repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
draperjames/qtpandas | setup.py | 1 | 4056 | # from __future__ import unicode_literals
# from __future__ import print_function
# from __future__ import division
# from __future__ import absolute_import
#
# from builtins import open
# from future import standard_library
# standard_library.install_aliases()
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import io
import codecs
import os
import re
import sys
# TODO: Remove the commented out loop below. Users should take care of the
# PySide or PyQt requirements before they install that way they can decide what
# works best for them. Then we can just use qtpy through the compat.py file
# to handle whatever Qt solution they picked.
# has_qt4 = True
# try:
# # sip is only needed for PyQt4, they should be imported together.
# # If we can let's remove all of the references to PyQt or Pyside in favor
# # of qtpy.
# import PyQt4
# # import sip
# except ImportError as e:
# has_qt4 = False
#
# try:
# import PySide
# except ImportError as e:
# if not has_qt4:
# # We know we failed to import PyQt4/sip...
# # And we failed to import pyside.
# raise ImportError("\n\nPlease install PyQt4 and sip or PySide")
# else:
# print("Using PyQt4")
here = os.path.abspath(os.path.dirname(__file__))
version_file = open(os.path.join(here, 'qtpandas', '__init__.py'), 'rU')
__version__ = re.sub(
r".*\b__version__\s+=\s+'([^']+)'.*",
r'\1',
[line.strip() for line in version_file if '__version__' in line].pop(0)
)
version_file.close()
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
short_description = """Utilities to use pandas (the data analysis / manipulation
library for Python) with Qt."""
try:
long_description = read('README.md')
except IOError:
long_description = "See README.md where installed."
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
tests_require = ["pandas >= 0.17.1",
'easygui',
'pyqt',
# 'pyside',
'pytest',
'pytest-qt',
'pytest-cov',
'future',
# 'python-magic==0.4.6'
]
setup(
name='qtpandas',
version=__version__,
url='https://github.com/draperjames/qtpandas',
license='MIT License',
namespace_packages=['qtpandas'],
author='Matthias Ludwig, Marcel Radischat, Zeke Barge, James Draper',
tests_require=tests_require,
install_requires=[
"pandas >= 0.17.1",
'easygui',
'pytest',
'pytest-qt>=1.2.2',
'qtpy',
'future',
'pytest-cov',
# 'python-magic==0.4.6'
],
cmdclass={'test': PyTest},
author_email='james.draper@duke.edu',
description=short_description,
long_description=long_description,
include_package_data=True,
packages=['qtpandas'],
platforms='any',
test_suite='tests',
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces'
],
extras_require={
'testing': tests_require,
}
)
| mit |
shyamalschandra/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
timsf/bayeslib | testing/mix.py | 1 | 1601 | import pdb
import importlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from bayeslib.backends.blocked_gibbs import normal_mixture as mixn, multinomial_mixture as mixm
import bayeslib.api_batch.mixtures as mix
plt.ion()
## test normal mixture on artificial dataset
nobs, ncomp, nvar = 100, 2, 3
pi = np.array([0.8, 0.2])
mu = np.array([(1, 2, 3), (-1, -2, -3)])
sig2 = np.array([(0.1, 0.2, 0.3), (0.1, 0.2, 0.3)])
Z = np.random.choice(ncomp, nobs, p=pi)
Y = np.random.normal(mu[Z], np.sqrt(sig2[Z]))
comp0 = (np.ones(ncomp),)
emit0 = [np.ones((ncomp, nvar))] * 4
importlib.reload(mixn)
mode = mixn.maximize(100, 2, Y, comp0, emit0)
draws = mixn.sample(100, 2, Y, comp0, emit0)
## test normal mixture API
importlib.reload(mix)
mod = mix.NormalMixture(2, Y, 500, 100, {}, {})
## test multinomial mixture on artificial dataset
nobs, ncomp, nvar = 100, 2, 3
pi = np.array([0.8, 0.2])
phi = np.array([(0.3, 0.3, 0.4), (0.1, 0.8, 0.1)])
Z = np.random.choice(ncomp, nobs, p=pi)
Y = np.array([np.random.multinomial(10, phi_i) for phi_i in phi[Z]])
comp0 = (np.ones(ncomp),)
emit0 = (np.ones((ncomp, nvar)),)
importlib.reload(mixm)
mode = mixm.maximize(100, 2, Y, comp0, emit0)
draws = mixm.sample(100, 2, Y, comp0, emit0)
## test multinomial mixture API
importlib.reload(mix)
mod = mix.MultinomialMixture(2, Y, 500, 100, {}, {})
# ## test multinomial IS
# from bayeslib.backends.isample import multinomial_mixture as mixis
# importlib.reload(mixis)
# state = mixis.init(int(1e3), comp0, emit0)
# for y in Y:
# state = mixis.update(y[np.newaxis], *state[0], state[1])
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/tkagg.py | 10 | 1250 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import tkinter as Tk
import numpy as np
from matplotlib.backends import _tkagg
def blit(photoimage, aggimage, bbox=None, colormode=1):
tk = photoimage.tk
if bbox is not None:
bbox_array = bbox.__array__()
else:
bbox_array = None
data = np.asarray(aggimage)
try:
tk.call(
"PyAggImagePhoto", photoimage,
id(data), colormode, id(bbox_array))
except Tk.TclError:
try:
try:
_tkagg.tkinit(tk.interpaddr(), 1)
except AttributeError:
_tkagg.tkinit(id(tk), 0)
tk.call("PyAggImagePhoto", photoimage,
id(data), colormode, id(bbox_array))
except (ImportError, AttributeError, Tk.TclError):
raise
def test(aggimage):
import time
r = Tk.Tk()
c = Tk.Canvas(r, width=aggimage.width, height=aggimage.height)
c.pack()
p = Tk.PhotoImage(width=aggimage.width, height=aggimage.height)
blit(p, aggimage)
c.create_image(aggimage.width,aggimage.height,image=p)
blit(p, aggimage)
while 1: r.update_idletasks()
| gpl-3.0 |
pxsdirac/tushare | tushare/datayes/equity.py | 17 | 6857 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Equity():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Equ(self, equTypeCD='', secID='', ticker='', listStatusCD='', field=''):
"""
获取股票的基本信息,包含股票交易代码及其简称、股票类型、上市状态、上市板块、上市日期等;上市状态为最新数据,不显示历史变动信息。
"""
code, result = self.client.getData(vs.EQU%(equTypeCD, secID, ticker, listStatusCD, field))
return _ret_data(code, result)
def EquAllot(self, isAllotment='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取股票历次配股的基本信息,包含每次配股方案的内容、方案进度、历史配股预案公布次数以及最终是否配股成功。
"""
code, result = self.client.getData(vs.EQUALLOT%(isAllotment, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def EquDiv(self, eventProcessCD='', exDivDate='', secID='', ticker='', beginDate='',
endDate='', field=''):
"""
获取股票历次分红(派现、送股、转增股)的基本信息,包含历次分红预案的内容、实施进展情况以及历史宣告分红次数。
"""
code, result = self.client.getData(vs.EQUDIV%(eventProcessCD, exDivDate,
secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def EquIndustry(self, industry='', industryID='', industryVersionCD='', secID='',
ticker='', intoDate='', field=''):
"""
输入证券ID或股票交易代码,获取股票所属行业分类
"""
code, result = self.client.getData(vs.EQUINDUSTRY%(industry, industryID, industryVersionCD,
secID, ticker, intoDate, field))
return _ret_data(code, result)
def EquIPO(self, eventProcessCD='', secID='', ticker='', field=''):
"""
获取股票首次公开发行上市的基本信息,包含股票首次公开发行的进程及发行结果。
"""
code, result = self.client.getData(vs.EQUIPO%(eventProcessCD, secID, ticker, field))
return _ret_data(code, result)
def EquRef(self, secID='', ticker='', beginDate='', endDate='', eventProcessCD='', field=''):
"""
获取股票股权分置改革的基本信息,包含股改进程、股改实施方案以及流通股的变动情况。
"""
code, result = self.client.getData(vs.EQUREF%(secID, ticker, beginDate, endDate,
eventProcessCD, field))
return _ret_data(code, result)
def EquRetud(self, listStatusCD='', secID='', ticker='', beginDate='',
dailyReturnNoReinvLower='', dailyReturnNoReinvUpper='',
dailyReturnReinvLower='', dailyReturnReinvUpper='',
endDate='', isChgPctl='', field=''):
"""
获取股票每日回报率的基本信息,包含交易当天的上市状态、日行情以及除权除息事项的基本数据。
"""
code, result = self.client.getData(vs.EQURETUD%(listStatusCD, secID, ticker,
beginDate, dailyReturnNoReinvLower,
dailyReturnNoReinvUpper,
dailyReturnReinvLower,
dailyReturnReinvUpper,
endDate, isChgPctl, field))
return _ret_data(code, result)
def EquSplits(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取股票进行股本拆细或者缩股的基本信息。
"""
code, result = self.client.getData(vs.EQUSPLITS%(secID, ticker, beginDate,
endDate, field))
return _ret_data(code, result)
def FstTotal(self, beginDate='', endDate='', exchangeCD='', field=''):
"""
获取上海、深圳交易所公布的每个交易日的融资融券交易汇总的信息,包括成交量、成交金额。本交易日可获取前一交易日的数据。
"""
code, result = self.client.getData(vs.FSTTOTAL%(beginDate, endDate,
exchangeCD, field))
return _ret_data(code, result)
def FstDetail(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取上海、深圳交易所公布的每个交易日的融资融券交易具体的信息,包括标的证券信息、融资融券金额以及数量方面的数据。本交易日可获取前一交易日的数据。
"""
code, result = self.client.getData(vs.FSTDETAIL%(secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def EquShare(self, secID='', ticker='', beginDate='', endDate='',
partyID='', field=''):
"""
获取上市公司股本结构及历次股本变动数据。
"""
code, result = self.client.getData(vs.EQUSHARE%(secID, ticker,
beginDate, endDate,
partyID, field))
return _ret_data(code, result)
def SecST(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
通过输入股票ID(通联编制)或股票交易代码(支持多值输入,最大支持50只),选择查询开始日期与结束日期,获取股票在一段时间ST标记信息。
"""
code, result = self.client.getData(vs.SECST%(secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
pypot/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
amolkahat/pandas | pandas/tests/test_algos.py | 2 | 71102 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
import struct
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com.asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_float64_factorize(self, writable):
data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)
exp_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_uint64_factorize(self, writable):
data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_int64_factorize(self, writable):
data = np.array([2**63 - 1, -2**63, 2**63 - 1], dtype=np.int64)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63 - 1, -2**63], dtype=np.int64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_string_factorize(self, writable):
data = np.array(['a', 'c', 'a', 'b', 'c'],
dtype=object)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0, 2, 1], dtype=np.intp)
exp_uniques = np.array(['a', 'c', 'b'], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_object_factorize(self, writable):
data = np.array(['a', 'c', None, np.nan, 'a', 'b', pd.NaT, 'c'],
dtype=object)
data.setflags(write=writable)
exp_labels = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)
exp_uniques = np.array(['a', 'c', 'b'], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000',
'2015-01-01T00:00:00.000000000',
'2015-01-01T00:00:00.000000000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
duplicated_items = ['a', np.nan, 'c', 'c']
result = pd.unique(duplicated_items)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
def test_obj_none_preservation(self):
# GH 20866
arr = np.array(['foo', None], dtype=object)
result = pd.unique(arr)
expected = np.array(['foo', None], dtype=object)
tm.assert_numpy_array_equal(result, expected, strict_nan=True)
def test_signed_zero(self):
# GH 21866
a = np.array([-0.0, 0.0])
result = pd.unique(a)
expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent
tm.assert_numpy_array_equal(result, expected)
def test_different_nans(self):
# GH 21866
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent
result = pd.unique(a)
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
def test_first_nan_kept(self):
# GH 22295
# create different nans from bit-patterns:
bits_for_nan1 = 0xfff8000000000001
bits_for_nan2 = 0x7ff8000000000001
NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
for el_type in [np.float64, np.object]:
a = np.array([NAN1, NAN2], dtype=el_type)
result = pd.unique(a)
assert result.size == 1
# use bit patterns to identify which nan was kept:
result_nan_bits = struct.unpack("=Q",
struct.pack("d", result[0]))[0]
assert result_nan_bits == bits_for_nan1
def test_do_not_mangle_na_values(self, unique_nulls_fixture,
unique_nulls_fixture2):
# GH 22295
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values not unique
a = np.array([unique_nulls_fixture,
unique_nulls_fixture2], dtype=np.object)
result = pd.unique(a)
assert result.size == 2
assert a[0] is unique_nulls_fixture
assert a[1] is unique_nulls_fixture2
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), {1})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), {'a'})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in(self):
# GH 22160
# nan is special, because from " a is b" doesn't follow "a == b"
# at least, isin() should follow python's "np.nan in [nan] == True"
# casting to -> np.float64 -> another float-object somewher on
# the way could lead jepardize this behavior
comps = [np.nan] # could be casted to float64
values = [np.nan]
expected = np.array([True])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
def test_same_object_is_in(self):
# GH 22160
# there could be special treatment for nans
# the user however could define a custom class
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
class LikeNan(object):
def __eq__(self):
return False
def __hash__(self):
return 0
a, b = LikeNan(), LikeNan()
# same object -> True
tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))
# different objects -> False
tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))
def test_different_nans(self):
# GH 22160
# all nans are handled as equivalent
comps = [float('nan')]
values = [float('nan')]
assert comps[0] is not values[0] # different nan-objects
# as list of python-objects:
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(np.array([True]), result)
# as object-array:
result = algos.isin(np.asarray(comps, dtype=np.object),
np.asarray(values, dtype=np.object))
tm.assert_numpy_array_equal(np.array([True]), result)
# as float64-array:
result = algos.isin(np.asarray(comps, dtype=np.float64),
np.asarray(values, dtype=np.float64))
tm.assert_numpy_array_equal(np.array([True]), result)
def test_no_cast(self):
# GH 22160
# ensure 42 is not casted to a string
comps = ['ss', 42]
values = ['42']
expected = np.array([False, False])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
def test_different_nan_objects(self):
# GH 22119
comps = np.array(['nan', np.nan * 1j, float('nan')], dtype=np.object)
vals = np.array([float('nan')], dtype=np.object)
expected = np.array([False, False, True])
result = algos.isin(comps, vals)
tm.assert_numpy_array_equal(expected, result)
def test_different_nans_as_float64(self):
# GH 21866
# create different nans from bit-patterns,
# these nans will land in different buckets in the hash-table
# if no special care is taken
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# check that NAN1 and NAN2 are equivalent:
arr = np.array([NAN1, NAN2], dtype=np.float64)
lookup1 = np.array([NAN1], dtype=np.float64)
result = algos.isin(arr, lookup1)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
lookup2 = np.array([NAN2], dtype=np.float64)
result = algos.isin(arr, lookup2)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique is True
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self, writable):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_add_signed_zeros(self):
# GH 21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would mean 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert len(m) == 1 # 0.0 and -0.0 are equivalent
def test_add_different_nans(self):
# GH 21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert len(m) == 1 # NAN1 and NAN2 are equivalent
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
@pytest.mark.parametrize('nvals', [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize('htable, uniques, dtype, safely_resizes', [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)])
def test_vector_resize(self, writable, htable, uniques, dtype,
safely_resizes, nvals):
# Test for memory errors after internal vector
# reallocations (GH 7157)
vals = np.array(np.random.randn(1000), dtype=dtype)
# GH 21688 ensures we can deal with read-only memory views
vals.setflags(write=writable)
# initialise instances; cannot initialise in parametrization,
# as otherwise external views would be held on the array (which is
# one of the things this test is checking)
htable = htable()
uniques = uniques()
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() sets an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (except for StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with tm.assert_raises_regex(ValueError, 'external reference.*'):
htable.get_labels(vals, uniques, 0, -1)
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
@pytest.mark.parametrize('htable, tm_dtype', [
(ht.PyObjectHashTable, 'String'),
(ht.StringHashTable, 'String'),
(ht.Float64HashTable, 'Float'),
(ht.Int64HashTable, 'Int'),
(ht.UInt64HashTable, 'UInt')])
def test_hashtable_unique(self, htable, tm_dtype, writable):
# output of maker has guaranteed unique elements
maker = getattr(tm, 'make' + tm_dtype + 'Index')
s = Series(maker(1000))
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
elif htable == ht.PyObjectHashTable:
# use different NaN types for object column
s.loc[500:502] = [np.nan, None, pd.NaT]
# create duplicated selection
s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
s_duplicated.values.setflags(write=writable)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.unique()
expected_unique = s_duplicated.drop_duplicates(keep='first').values
result_unique = htable().unique(s_duplicated.values)
tm.assert_numpy_array_equal(result_unique, expected_unique)
@pytest.mark.parametrize('htable, tm_dtype', [
(ht.PyObjectHashTable, 'String'),
(ht.StringHashTable, 'String'),
(ht.Float64HashTable, 'Float'),
(ht.Int64HashTable, 'Int'),
(ht.UInt64HashTable, 'UInt')])
def test_hashtable_factorize(self, htable, tm_dtype, writable):
# output of maker has guaranteed unique elements
maker = getattr(tm, 'make' + tm_dtype + 'Index')
s = Series(maker(1000))
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
elif htable == ht.PyObjectHashTable:
# use different NaN types for object column
s.loc[500:502] = [np.nan, None, pd.NaT]
# create duplicated selection
s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
s_duplicated.values.setflags(write=writable)
na_mask = s_duplicated.isna().values
result_inverse, result_unique = htable().factorize(s_duplicated.values)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.factorize()
# since factorize removes all NaNs, we do the same here
expected_unique = s_duplicated.dropna().drop_duplicates().values
tm.assert_numpy_array_equal(result_unique, expected_unique)
# reconstruction can only succeed if the inverse is correct. Since
# factorize removes the NaNs, those have to be excluded here as well
result_reconstruct = result_unique[result_inverse[~na_mask]]
expected_reconstruct = s_duplicated.dropna().values
tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0], dtype='int64')]
assert (not libalgos.is_lexsorted(failure))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns int64
expected = np.argsort(a, kind='mergesort')
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns int64
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
exp = Series(['a'], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(idx), exp)
| bsd-3-clause |
klocey/ScalingMicroBiodiversity | fig-scripts/AppFigs/Fig1_Variants/DataSetComparison.py | 2 | 3727 | from __future__ import division
import os
import sys
import matplotlib.pyplot as plt
from matplotlib.pyplot import setp
mydir = os.path.expanduser("~/GitHub/MicrobialScaling/")
# function for setting the colors of the box plots pairs
def setBoxColors(bp):
#print len(bp['caps'])
#print bp['fliers'][0]
#print bp['fliers'][1]
#print bp['fliers'][2]
#print bp['fliers'][3]
setp(bp['boxes'][0], color='blue')
setp(bp['caps'][0], color='blue')
setp(bp['caps'][1], color='blue')
setp(bp['whiskers'][0], color='blue')
setp(bp['whiskers'][1], color='blue')
#setp(bp['fliers'][0], color='blue')
#setp(bp['fliers'][1], color='blue')
setp(bp['medians'][0], color='blue')
setp(bp['boxes'][1], color='red')
setp(bp['caps'][2], color='red')
setp(bp['caps'][3], color='red')
setp(bp['whiskers'][2], color='red')
setp(bp['whiskers'][3], color='red')
#setp(bp['fliers'][2], color='red')
#setp(bp['fliers'][3], color='red')
setp(bp['medians'][1], color='red')
return
datasets = []
metrics = ['rarity', 'dominance', 'evenness', 'richness']
#GoodNames = ['BIGN', 'SED', 'BOVINE','CHU', 'LAUB', 'CHINA', 'CATLIN', 'FUNGI', 'HUMAN', 'HYDRO', 'HMP', 'EMPopen', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
GoodNames = ['BCLS', 'CHINA', 'CATLIN', 'HUMAN', 'FUNGI', 'HYDRO', 'EMPopen', 'HMP', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
for m in metrics:
micNlist, micSlist, micIntList, micCoefList = [[], [], [], []]
macNlist, macSlist, macIntList, macCoefList = [[], [], [], []]
#IN = open(mydir + 'output/SummaryPerDataset_NoMicrobe1s.txt','r')
IN = open(mydir + 'output/SummaryPerDataset.txt','r')
for data in IN:
data = data.split()
name, kind, metric, avgN, avgS, Int, Coef = data
if name in GoodNames: pass
else: continue
if metric == m and kind == 'micro':
micNlist.append(float(avgN))
micSlist.append(float(avgS))
micIntList.append(float(Int))
micCoefList.append(float(Coef))
elif metric == m and kind == 'macro':
macNlist.append(float(avgN))
macSlist.append(float(avgS))
macIntList.append(float(Int))
macCoefList.append(float(Coef))
#print name, avgN, avgS
IN.close()
fig = plt.figure()
ax = plt.axes()
#plt.hold(True)
# first boxplot pair
Ints = [micIntList, macIntList]
bp = plt.boxplot(Ints, positions = [1, 2], widths = 0.6)
setBoxColors(bp)
# second boxplot pair
Coefs = [micCoefList, macCoefList]
bp = plt.boxplot(Coefs, positions = [4, 5], widths = 0.6)
setBoxColors(bp)
# set axes limits and labels
plt.xlim(0, 10)
#plt.ylim(0, 9)
#plt.yscale('log')
ax.set_xticklabels(['Intercepts', 'Exponents'])#, 'avg N', 'avg S'])
ax.set_xticks([1.5, 4.5])#, 7.5, 10.5])
# draw temporary red and blue lines and use them to create a legend
hB, = plt.plot([1,1],'b-')
hR, = plt.plot([1,1],'r-')
plt.legend((hB, hR),('Microbes', 'Macrobes'))
if m == 'dominance':
ax.axhline(1.0, 0, 1., ls = '--', c = '0.3')
plt.title(m)
hB.set_visible(False)
hR.set_visible(False)
#plt.savefig(mydir+'/figs/appendix/DatasetComparison/'+m+'_NoMicrobeSingletons_ClosedRef.png', dpi=600, bbox_inches = "tight")
#plt.savefig(mydir+'/figs/appendix/DatasetComparison/'+m+'_NoMicrobeSingletons_OpenRef.png', dpi=600, bbox_inches = "tight")
#plt.savefig(mydir+'/figs/appendix/DatasetComparison/'+m+'_ClosedRef.png', dpi=600, bbox_inches = "tight")
plt.savefig(mydir+'/figs/appendix/DatasetComparison/'+m+'_OpenRef.png', dpi=600, bbox_inches = "tight")
plt.show()
| gpl-3.0 |
facom/AstrodynTools | tides/potential-contours.py | 1 | 1071 | #!/usr/bin/env python
#-*-coding:utf-8-*-
from constants import *
from numpy import *
from matplotlib.pyplot import *
###################################################
#UTILITIES
###################################################
DEG=pi/180
RAD=180/pi
def P2(psi):
p=0.5*(3*cos(psi)**2-1)
return p
###################################################
#SCRIPT
###################################################
C=Rearth
rho=rhoearth
eps2=0.2*0
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#GRID
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Nx=100
X=linspace(-1.2*C,1.2*C,100)
Ny=100
Y=linspace(-1.2*C,1.2*C,100)
XM,YM=meshgrid(X,Y)
VM=zeros((Nx,Ny))
for i in xrange(Nx):
x=X[i]
for j in xrange(Ny):
y=Y[j]
theta=arctan(y/x)
r=sqrt(x**2+y**2)
if r<C:
V=-4*pi/3*C**3*rho*Gconst*((3*C**2-r**2)/(2*C**3)+3./5*(r**2/C**3)*eps2*P2(theta))
else:
V=-4*pi/3*C**3*rho*Gconst*(1/r+3./5*C**2/r**3*eps2*P2(theta))
VM[j,i]=V
close("all")
figure(figsize=(8,8))
contour(XM/C,YM/C,VM)
savefig("potential-contour.png")
| gpl-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/util/print_versions.py | 7 | 4898 | import os
import platform
import sys
import struct
import subprocess
import codecs
import locale
import importlib
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("pandas"):
try:
pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
so, serr = pipe.communicate()
except:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode('utf-8')
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(('commit', commit))
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
])
except:
pass
return blob
def show_versions(as_json=False):
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("nose", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("statsmodels", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("blosc", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
("xlwt", lambda mod: mod.__VERSION__),
("xlsxwriter", lambda mod: mod.__version__),
("lxml", lambda mod: mod.etree.__version__),
("bs4", lambda mod: mod.__version__),
("html5lib", lambda mod: mod.__version__),
("httplib2", lambda mod: mod.__version__),
("apiclient", lambda mod: mod.__version__),
("sqlalchemy", lambda mod: mod.__version__),
("pymysql", lambda mod: mod.__version__),
("psycopg2", lambda mod: mod.__version__),
("jinja2", lambda mod: mod.__version__),
("boto", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__)
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("%s: %s" % (k, stat))
print("")
for k, stat in deps_blob:
print("%s: %s" % (k, stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
jdmcbr/blaze | blaze/expr/collections.py | 2 | 20040 | from __future__ import absolute_import, division, print_function
from functools import partial
from itertools import chain
import datashape
from datashape import (
DataShape, Option, Record, Unit, dshape, var, Fixed, Var, promote, object_,
)
from datashape.predicates import isscalar, iscollection, isrecord
from toolz import (
isdistinct, frequencies, concat as tconcat, unique, get, first, compose,
keymap,
)
import toolz.curried.operator as op
from odo.utils import copydoc
from .core import common_subexpression
from .expressions import Expr, ElemWise, label, Field
from .expressions import dshape_method_list
from ..compatibility import zip_longest, _strtypes
from ..utils import listpack
__all__ = ['Sort', 'Distinct', 'Head', 'Merge', 'IsIn', 'isin', 'distinct',
'merge', 'head', 'sort', 'Join', 'join', 'transform', 'Concat',
'concat', 'Tail', 'tail']
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
__slots__ = '_hash', '_child', '_key', 'ascending'
@property
def dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort a collection
Parameters
----------
key : str, list of str, or Expr
Defines by what you want to sort.
* A single column string: ``t.sort('amount')``
* A list of column strings: ``t.sort(['name', 'amount'])``
* An expression: ``t.sort(-t.amount)``
ascending : bool, optional
Determines order of the sort
"""
if not isrecord(child.dshape.measure):
key = None
if isinstance(key, list):
key = tuple(key)
return Sort(child, key, ascending)
class Distinct(Expr):
""" Remove duplicate elements from an expression
Parameters
----------
on : tuple of :class:`~blaze.expr.expressions.Field`
The subset of fields or names of fields to be distinct on.
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
Use a subset by passing `on`:
>>> import pandas as pd
>>> e = distinct(t, 'name')
>>> data = pd.DataFrame([['Alice', 100, 1],
... ['Alice', 200, 2],
... ['Bob', 100, 1],
... ['Bob', 200, 2]],
... columns=['name', 'amount', 'id'])
>>> compute(e, data)
name amount id
0 Alice 100 1
1 Bob 100 1
"""
__slots__ = '_hash', '_child', 'on'
@property
def dshape(self):
return datashape.var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct({child}{on})'.format(
child=self._child,
on=(', ' if self.on else '') + ', '.join(map(str, self.on))
)
@copydoc(Distinct)
def distinct(expr, *on):
fields = frozenset(expr.fields)
_on = []
append = _on.append
for n in on:
if isinstance(n, Field):
if n._child.isidentical(expr):
n = n._name
else:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
if not isinstance(n, _strtypes):
raise TypeError('on must be a name or field, not: {0}'.format(n))
elif n not in fields:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
append(n)
return Distinct(expr, tuple(_on))
class _HeadOrTail(Expr):
__slots__ = '_hash', '_child', 'n'
@property
def dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.%s(%d)' % (self._child, type(self).__name__.lower(), self.n)
class Head(_HeadOrTail):
""" First `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Tail
"""
pass
@copydoc(Head)
def head(child, n=10):
return Head(child, n)
class Tail(_HeadOrTail):
""" Last `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.tail(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Head
"""
pass
@copydoc(Tail)
def tail(child, n=10):
return Tail(child, n)
def transform(t, replace=True, **kwargs):
""" Add named columns to table
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
"""
if replace and set(t.fields).intersection(set(kwargs)):
t = t[[c for c in t.fields if c not in kwargs]]
args = [t] + [v.label(k) for k, v in sorted(kwargs.items(), key=first)]
return merge(*args)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
names, values = [], []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
names.extend(schema.names)
values.extend(schema.types)
elif isinstance(schema, Unit):
names.append(c._name)
values.append(schema)
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % c.schema[0])
return dshape(Record(list(zip(names, values))))
class Merge(ElemWise):
""" Merge many fields together
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
"""
__slots__ = '_hash', '_child', 'children'
@property
def schema(self):
return schema_concat(self.children)
@property
def fields(self):
return list(tconcat(child.fields for child in self.children))
def _subterms(self):
yield self
for i in self.children:
for node in i._subterms():
yield node
def _get_field(self, key):
for child in self.children:
if key in child.fields:
if isscalar(child.dshape.measure):
return child
else:
return child[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*[self[c] for c in key])
def _leaves(self):
return list(unique(tconcat(i._leaves() for i in self.children)))
@copydoc(Merge)
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
if exprs:
return exprs[0]
if kwargs:
[(k, v)] = kwargs.items()
return v.label(k)
# Get common sub expression
exprs += tuple(label(v, k) for k, v in sorted(kwargs.items(), key=first))
try:
child = common_subexpression(*exprs)
except Exception:
raise ValueError("No common subexpression found for input expressions")
result = Merge(child, exprs)
if not isdistinct(result.fields):
raise ValueError(
"Repeated columns found: " + ', '.join(
k for k, v in frequencies(result.fields).items() if v > 1
),
)
return result
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs, rhs : Expr
Expressions to join
on_left : str, optional
The fields from the left side to join on.
If no ``on_right`` is passed, then these are the fields for both
sides.
on_right : str, optional
The fields from the right side to join on.
how : {'inner', 'outer', 'left', 'right'}
What type of join to perform.
suffixes: pair of str
The suffixes to be applied to the left and right sides
in order to resolve duplicate field names.
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = (
'_hash', 'lhs', 'rhs', '_on_left', '_on_right', 'how', 'suffixes',
)
__inputs__ = 'lhs', 'rhs'
@property
def on_left(self):
on_left = self._on_left
if isinstance(on_left, tuple):
return list(on_left)
return on_left
@property
def on_right(self):
on_right = self._on_right
if isinstance(on_right, tuple):
return list(on_right)
return on_right
@property
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
on_left = self.on_left
if not isinstance(on_left, list):
on_left = on_left,
on_right = self.on_right
if not isinstance(on_right, list):
on_right = on_right,
right_types = keymap(
dict(zip(on_right, on_left)).get,
self.rhs.dshape.measure.dict,
)
joined = (
(name, promote(dt, right_types[name], promote_option=False))
for n, (name, dt) in enumerate(filter(
compose(op.contains(on_left), first),
self.lhs.dshape.measure.fields,
))
)
left = [
(name, dt) for name, dt in zip(
self.lhs.fields,
types_of_fields(self.lhs.fields, self.lhs)
) if name not in on_left
]
right = [
(name, dt) for name, dt in zip(
self.rhs.fields,
types_of_fields(self.rhs.fields, self.rhs)
) if name not in on_right
]
# Handle overlapping but non-joined case, e.g.
left_other = set(name for name, dt in left if name not in on_left)
right_other = set(name for name, dt in right if name not in on_right)
overlap = left_other & right_other
left_suffix, right_suffix = self.suffixes
left = ((name + left_suffix if name in overlap else name, dt)
for name, dt in left)
right = ((name + right_suffix if name in overlap else name, dt)
for name, dt in right)
if self.how in ('right', 'outer'):
left = ((name, option(dt)) for name, dt in left)
if self.how in ('left', 'outer'):
right = ((name, option(dt)) for name, dt in right)
return dshape(Record(chain(joined, left, right)))
@property
def dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields, = fields
assert fields == expr._name
return expr.dshape.measure
@copydoc(Join)
def join(lhs, rhs, on_left=None, on_right=None,
how='inner', suffixes=('_left', '_right')):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError(
"Can not Join. No shared columns between %s and %s" % (lhs, rhs),
)
left_types = listpack(types_of_fields(on_left, lhs))
right_types = listpack(types_of_fields(on_right, rhs))
if len(left_types) != len(right_types):
raise ValueError(
'Length of on_left=%d not equal to length of on_right=%d' % (
len(left_types), len(right_types),
),
)
for n, promotion in enumerate(map(partial(promote, promote_option=False),
left_types,
right_types)):
if promotion == object_:
raise TypeError(
'Schemata of joining columns do not match,'
' no promotion found for %s=%s and %s=%s' % (
on_left[n], left_types[n], on_right[n], right_types[n],
),
)
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how, suffixes)
class Concat(Expr):
""" Stack tables on common columns
Parameters
----------
lhs, rhs : Expr
Collections to concatenate
axis : int, optional
The axis to concatenate on.
Examples
--------
>>> from blaze import symbol
Vertically stack tables:
>>> names = symbol('names', '5 * {name: string, id: int32}')
>>> more_names = symbol('more_names', '7 * {name: string, id: int32}')
>>> stacked = concat(names, more_names)
>>> stacked.dshape
dshape("12 * {name: string, id: int32}")
Vertically stack matrices:
>>> mat_a = symbol('a', '3 * 5 * int32')
>>> mat_b = symbol('b', '3 * 5 * int32')
>>> vstacked = concat(mat_a, mat_b, axis=0)
>>> vstacked.dshape
dshape("6 * 5 * int32")
Horizontally stack matrices:
>>> hstacked = concat(mat_a, mat_b, axis=1)
>>> hstacked.dshape
dshape("3 * 10 * int32")
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = '_hash', 'lhs', 'rhs', 'axis'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
axis = self.axis
ldshape = self.lhs.dshape
lshape = ldshape.shape
return DataShape(
*(lshape[:axis] + (
_shape_add(lshape[axis], self.rhs.dshape.shape[axis]),
) + lshape[axis + 1:] + (ldshape.measure,))
)
def _shape_add(a, b):
if isinstance(a, Var) or isinstance(b, Var):
return var
return Fixed(a.val + b.val)
@copydoc(Concat)
def concat(lhs, rhs, axis=0):
ldshape = lhs.dshape
rdshape = rhs.dshape
if ldshape.measure != rdshape.measure:
raise TypeError(
'Mismatched measures: {l} != {r}'.format(
l=ldshape.measure, r=rdshape.measure
),
)
lshape = ldshape.shape
rshape = rdshape.shape
for n, (a, b) in enumerate(zip_longest(lshape, rshape, fillvalue=None)):
if n != axis and a != b:
raise TypeError(
'Shapes are not equal along axis {n}: {a} != {b}'.format(
n=n, a=a, b=b,
),
)
if axis < 0 or 0 < len(lshape) <= axis:
raise ValueError(
"Invalid axis '{a}', must be in range: [0, {n})".format(
a=axis, n=len(lshape)
),
)
return Concat(lhs, rhs, axis)
class IsIn(ElemWise):
"""Check if an expression contains values from a set.
Return a boolean expression indicating whether another expression
contains values that are members of a collection.
Parameters
----------
expr : Expr
Expression whose elements to check for membership in `keys`
keys : Sequence
Elements to test against. Blaze stores this as a ``frozenset``.
Examples
--------
Check if a vector contains any of 1, 2 or 3:
>>> from blaze import symbol
>>> t = symbol('t', '10 * int64')
>>> expr = t.isin([1, 2, 3])
>>> expr.dshape
dshape("10 * bool")
"""
__slots__ = '_hash', '_child', '_keys'
@property
def schema(self):
return datashape.bool_
def __str__(self):
return '%s.%s(%s)' % (self._child, type(self).__name__.lower(),
self._keys)
@copydoc(IsIn)
def isin(expr, keys):
if isinstance(keys, Expr):
raise TypeError('keys argument cannot be an expression, '
'it must be an iterable object such as a list, '
'tuple or set')
return IsIn(expr, frozenset(keys))
dshape_method_list.extend([
(iscollection, set([sort, head, tail])),
(lambda ds: len(ds.shape) == 1, set([distinct])),
(lambda ds: len(ds.shape) == 1 and isscalar(ds.measure), set([isin])),
])
| bsd-3-clause |
jimgoo/zipline-fork | zipline/protocol.py | 3 | 17052 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
def minute_get_bars(days):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
env = get_algo_instance().trading_environment
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| apache-2.0 |
smueller18/solar-thermal-climate-system | consumer/machine-state-prediction/consumer.py | 1 | 3723 | #!/usr/bin/env python3
import os
import time
import logging.config
import pickle
import pandas as pd
from pca import PCAForPandas
import kafka_connector.avro_loop_consumer as avro_loop_consumer
from kafka_connector.avro_loop_consumer import AvroLoopConsumer
from tsfresh.feature_extraction import extract_features
from tsfresh.feature_extraction import settings
__author__ = u'Stephan Müller'
__copyright__ = u'2017, Stephan Müller'
__license__ = u'MIT'
__dirname__ = os.path.dirname(os.path.abspath(__file__))
KAFKA_HOSTS = os.getenv("KAFKA_HOSTS", "kafka:9092")
SCHEMA_REGISTRY_URL = os.getenv("SCHEMA_REGISTRY_URL", "http://schema-registry:8082")
CONSUMER_GROUP = os.getenv("CONSUMER_GROUP", "postgres")
TOPIC_NAME = os.getenv("TOPIC_NAME", "prod.machine_learning.aggregations_10minutes")
SHOW_CALCULATION_TIME = int(os.getenv("SHOW_CALCULATION_TIME", 0))
# Dict with column names
FC_PARAMETERS = os.getenv("FC_PARAMETERS", __dirname__ + "/data/fc-parameters.pkl")
# PCAForPandas object
PCA_MODEL = os.getenv("PCA_MODEL", __dirname__ + "/data/pca-model.pkl")
# ML model with predict function
ML_MODEL = os.getenv("ML_MODEL", __dirname__ + "/data/ml-model.pkl")
LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO")
logging_format = "%(levelname)8s %(asctime)s %(name)s [%(filename)s:%(lineno)s - %(funcName)s() ] %(message)s"
logging.basicConfig(level=logging.getLevelName(LOGGING_LEVEL), format=logging_format)
logger = logging.getLogger('consumer')
with open(FC_PARAMETERS, 'rb') as f:
fc_parameters = pickle.load(f)
with open(PCA_MODEL, 'rb') as f:
pca_model = pickle.load(f)
with open(ML_MODEL, 'rb') as f:
ml_model = pickle.load(f)
def handle_message(msg):
if msg.key() is None or type(msg.key()) is not dict:
logger.warning("Key is none. Ignoring message.")
return
elif msg.value() is None or type(msg.value()) is not dict:
logger.warning("Value is none. Ignoring message.")
return
try:
time_begin = time.time()
timeseries = pd.melt(pd.DataFrame.from_dict(msg.value(), orient='index').transpose()).dropna()
timeseries['group_id'] = 0
if timeseries.isnull().sum().sum() > 0:
logger.warning("at least one field of timeseries is null")
return
X = extract_features(timeseries, column_id='group_id', column_kind="variable", column_value="value",
kind_to_fc_parameters=settings.from_columns(fc_parameters))
if X.isnull().sum().sum() > 0:
logger.warning("at least one field of extracted features is null")
return
kritisch = ml_model.predict(pca_model.transform(X))[0]
time_end = time.time()
start_prediction_interval = time.localtime(msg.key()['timestamp_end'] / 1000)
end_prediction_interval = time.localtime(msg.key()['timestamp_end'] / 1000 + 60*5)
print("Prediction for interval",
time.strftime("%H:%M:%S", start_prediction_interval),
"to",
time.strftime("%H:%M:%S", end_prediction_interval),
":",
"kritisch" if kritisch else "unkritisch"
)
if SHOW_CALCULATION_TIME == 1:
print("time for calculation", round(time_end - time_begin, 5), "seconds")
except Exception as e:
logger.exception(e)
consumer.stop()
config = avro_loop_consumer.default_config
config['enable.auto.commit'] = True
config['default.topic.config'] = dict()
config['default.topic.config']['auto.offset.reset'] = 'end'
consumer = AvroLoopConsumer(KAFKA_HOSTS, SCHEMA_REGISTRY_URL, CONSUMER_GROUP, [TOPIC_NAME], config=config)
consumer.loop(lambda msg: handle_message(msg))
| mit |
iamshang1/Projects | Advanced_ML/Human_Activity_Recognition/LSTM/lstm_within_subject.py | 1 | 8787 | import numpy as np
import theano
import theano.tensor as T
import sys
import random
from sklearn.model_selection import train_test_split
sys.setrecursionlimit(10000)
class lstm(object):
'''
long short term memory network for classifying human activity
from incremental summary statistics
parameters:
- binary: boolean (default False)
use True if labels are for ambulatory/non-ambulatory
use False if labels are for non-ambulatory/walking/running/upstairs/downstairs
methods:
- train(X_train, y_train)
train lstm network
parameters:
- X_train: 2d numpy array
training features output by record_fetcher_within_subject.py
- y_train: 2d numpy array
training labels output by record_fetcher_within_subject.py
outputs:
- training loss at given iteration
- predict(X_test)
predict label from test data
parameters:
- X_test: 2d numpy array
testing features output by record_fetcher_within_subject.py
outputs:
- predicted labels for test features
'''
def __init__(self,binary=False):
#layer 1 - lstm units
self.input = T.matrix()
self.Wi = theano.shared(self.ortho_weight(128)[:109,:],borrow=True)
self.Wf = theano.shared(self.ortho_weight(128)[:109,:],borrow=True)
self.Wc = theano.shared(self.ortho_weight(128)[:109,:],borrow=True)
self.Wo = theano.shared(self.ortho_weight(128)[:109,:],borrow=True)
self.Ui = theano.shared(self.ortho_weight(128),borrow=True)
self.Uf = theano.shared(self.ortho_weight(128),borrow=True)
self.Uc = theano.shared(self.ortho_weight(128),borrow=True)
self.Uo = theano.shared(self.ortho_weight(128),borrow=True)
self.bi = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.bf = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.bc = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.bo = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.C0 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.h0 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
#layer 2 - lstm units
self.Wi2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Wf2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Wc2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Wo2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Ui2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Uf2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Uc2 = theano.shared(self.ortho_weight(128),borrow=True)
self.Uo2 = theano.shared(self.ortho_weight(128),borrow=True)
self.bi2 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.bf2 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.bc2 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.bo2 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.C02 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
self.h02 = theano.shared(np.zeros((128,), dtype=theano.config.floatX),borrow=True)
#layer 3 - softmax
if binary:
self.W2 = theano.shared(self.ortho_weight(128)[:,:2],borrow=True)
self.b2 = theano.shared(np.zeros((2,), dtype=theano.config.floatX),borrow=True)
else:
self.W2 = theano.shared(self.ortho_weight(128)[:,:5],borrow=True)
self.b2 = theano.shared(np.zeros((5,), dtype=theano.config.floatX),borrow=True)
self.target = T.matrix()
#scan operation for lstm layer 1
self.params1 = [self.Wi,self.Wf,self.Wc,self.Wo,self.Ui,self.Uf,self.Uc,self.Uo,self.bi,self.bf,self.bc,self.bo]
[self.c1,self.h_output1],_ = theano.scan(fn=self.step,sequences=self.input,outputs_info=[self.C0,self.h0],non_sequences=self.params1)
#scan operation for lstm layer 2
self.params2 = [self.Wi2,self.Wf2,self.Wc2,self.Wo2,self.Ui2,self.Uf2,self.Uc2,self.Uo2,self.bi2,self.bf2,self.bc2,self.bo2]
[self.c2,self.h_output2],_ = theano.scan(fn=self.step,sequences=self.h_output1,outputs_info=[self.C02,self.h02],non_sequences=self.params2)
#final softmax, final output is average of output of last 4 timesteps
self.output = T.nnet.softmax(T.dot(self.h_output2,self.W2)+self.b2)[-4:,:]
self.output = T.mean(self.output,0,keepdims=True)
#cost, updates, train, and predict
self.cost = T.nnet.categorical_crossentropy(self.output,self.target).mean()
self.updates = self.adam(self.cost,self.params1+self.params2+[self.h0,self.C0,self.h02,self.C02,self.W2,self.b2])
self.train = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.predict = theano.function([self.input],self.output,allow_input_downcast=True)
def step(self,input,h0,C0,Wi,Wf,Wc,Wo,Ui,Uf,Uc,Uo,bi,bf,bc,bo):
'''
step function for lstm unit
'''
i = T.nnet.sigmoid(T.dot(input,Wi)+T.dot(h0,Ui)+bi)
cand = T.tanh(T.dot(input,Wc)+T.dot(h0,Uc)+bc)
f = T.nnet.sigmoid(T.dot(input,Wf)+T.dot(h0,Uf)+bf)
c = cand*i+C0*f
o = T.nnet.sigmoid(T.dot(input,Wo)+T.dot(h0,Uo)+bo)
h = o*T.tanh(c)
return c,h
def ortho_weight(self,ndim):
'''
orthogonal weight initialization for lstm layers
'''
bound = np.sqrt(1./ndim)
W = np.random.randn(ndim, ndim)*bound
u, s, v = np.linalg.svd(W)
return u.astype(theano.config.floatX)
def adam(self, cost, params, lr=0.0002, b1=0.1, b2=0.01, e=1e-8):
'''
adaptive moment estimation gradient descent
'''
updates = []
grads = T.grad(cost, params)
self.i = theano.shared(np.float32(0.))
i_t = self.i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
self.m = theano.shared(p.get_value() * 0.)
self.v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * self.m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * self.v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((self.m, m_t))
updates.append((self.v, v_t))
updates.append((p, p_t))
updates.append((self.i, i_t))
return updates
#load data
X = np.load('X.npy')
y = np.load('y.npy')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# verify the required arguments are given
if (len(sys.argv) < 2):
print 'Usage: python lstm_within_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
if sys.argv[1] == '1':
binary = True
elif sys.argv[1] == '0':
binary = False
else:
print 'Usage: python lstm_within_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
#separate training data by label
X_train_split = []
y_train_split = []
if binary:
for i in range(2):
idx = y_train[:,i] == 1
X_train_split.append(X_train[idx])
y_train_split.append(y_train[idx])
else:
for i in range(5):
idx = y_train[:,i] == 1
X_train_split.append(X_train[idx])
y_train_split.append(y_train[idx])
combined_split = zip(X_train_split,y_train_split)
#train
NN = lstm(binary=binary)
for i in range(1000000):
#select a random training label
idx = random.choice(combined_split)
X_in = idx[0]
y_in = idx[1]
#select random training sample with that label
idx = np.random.randint(y_in.shape[0])
X_in = X_in[idx,:,:]
y_in = np.expand_dims(y_in[idx,:],0)
#train on random sample
cost = NN.train(X_in,y_in)
print "step %i training error: %.4f \r" % (i+1, cost),
#predict every 10000 iterations
if (i+1) % 10000 == 0:
correct = 0
#predict each entry in test set
for j in range(y_test.shape[0]):
print "predicting %i of %i in test set \r" % (j+1, y_test.shape[0]),
pred = NN.predict(X_test[j,:,:])
if np.argmax(pred[0]) == np.argmax(y_test[j,:]):
correct += 1
print "step %i test accuracy: %.4f " % (i+1,float(correct)/y_test.shape[0]*100) | mit |
kwilliams-mo/iris | lib/iris/tests/test_trajectory.py | 2 | 8439 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import matplotlib.pyplot as plt
import numpy as np
import iris.analysis.trajectory
from iris.fileformats.manager import DataManager
import iris.tests.stock
class TestSimple(tests.IrisTest):
def test_invalid_coord(self):
cube = iris.tests.stock.realistic_4d()
sample_points = [('altitude', [0, 10, 50])]
with self.assertRaises(ValueError):
iris.analysis.trajectory.interpolate(cube, sample_points, 'nearest')
class TestTrajectory(tests.IrisTest):
def test_trajectory_definition(self):
# basic 2-seg line along x
waypoints = [ {'lat':0, 'lon':0}, {'lat':0, 'lon':1}, {'lat':0, 'lon':2} ]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=21)
self.assertEqual(trajectory.length, 2.0)
self.assertEqual(trajectory.sampled_points[19], {'lat': 0.0, 'lon': 1.9000000000000001})
# 4-seg m-shape
waypoints = [ {'lat':0, 'lon':0}, {'lat':1, 'lon':1}, {'lat':0, 'lon':2}, {'lat':1, 'lon':3}, {'lat':0, 'lon':4} ]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=33)
self.assertEqual(trajectory.length, 5.6568542494923806)
self.assertEqual(trajectory.sampled_points[31], {'lat': 0.12499999999999989, 'lon': 3.875})
@iris.tests.skip_data
def test_trajectory_extraction(self):
# Load the COLPEX data => TZYX
path = tests.get_data_path(['PP', 'COLPEX', 'theta_and_orog_subset.pp'])
cube = iris.load_cube(path, 'air_potential_temperature')
cube.coord('grid_latitude').bounds = None
cube.coord('grid_longitude').bounds = None
# TODO: Workaround until regrid can handle factories
cube.remove_aux_factory(cube.aux_factories[0])
cube.remove_coord('surface_altitude')
self.assertCML(cube, ('trajectory', 'big_cube.cml'))
# Pull out a single point
single_point = iris.analysis.trajectory.interpolate(
cube, [('grid_latitude', [-0.1188]),
('grid_longitude', [359.57958984])])
self.assertCML(single_point, ('trajectory', 'single_point.cml'))
# Extract a simple, axis-aligned trajectory that is similar to an indexing operation.
# (It's not exactly the same because the source cube doesn't have regular spacing.)
waypoints = [
{'grid_latitude': -0.1188, 'grid_longitude': 359.57958984},
{'grid_latitude': -0.1188, 'grid_longitude': 359.66870117}
]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=100)
def traj_to_sample_points(trajectory):
sample_points = []
src_points = trajectory.sampled_points
for name in src_points[0].iterkeys():
values = [point[name] for point in src_points]
sample_points.append((name, values))
return sample_points
sample_points = traj_to_sample_points(trajectory)
trajectory_cube = iris.analysis.trajectory.interpolate(cube,
sample_points)
self.assertCML(trajectory_cube, ('trajectory', 'constant_latitude.cml'))
# Sanity check the results against a simple slice
plt.plot(cube[0, 0, 10, :].data)
plt.plot(trajectory_cube[0, 0, :].data)
self.check_graphic()
# Extract a zig-zag trajectory
waypoints = [
{'grid_latitude': -0.1188, 'grid_longitude': 359.5886},
{'grid_latitude': -0.0828, 'grid_longitude': 359.6606},
{'grid_latitude': -0.0468, 'grid_longitude': 359.6246},
]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=100)
sample_points = traj_to_sample_points(trajectory)
trajectory_cube = iris.analysis.trajectory.interpolate(cube,
sample_points)
self.assertCML(trajectory_cube, ('trajectory', 'zigzag.cml'))
# Sanity check the results against a simple slice
x = cube.coord('grid_longitude').points
y = cube.coord('grid_latitude').points
plt.pcolormesh(x, y, cube[0, 0, :, :].data)
x = trajectory_cube.coord('grid_longitude').points
y = trajectory_cube.coord('grid_latitude').points
plt.scatter(x, y, c=trajectory_cube[0, 0, :].data)
self.check_graphic()
@iris.tests.skip_data
def test_tri_polar(self):
# load data
cubes = iris.load(tests.get_data_path(['NetCDF', 'ORCA2', 'votemper.nc']))
cube = cubes[0]
# The netCDF file has different data types for the points and
# bounds of 'depth'. This wasn't previously supported, so we
# emulate that old behaviour.
cube.coord('depth').bounds = cube.coord('depth').bounds.astype(np.float32)
# define a latitude trajectory (put coords in a different order to the cube, just to be awkward)
latitudes = range(-90, 90, 2)
longitudes = [-90]*len(latitudes)
sample_points = [('longitude', longitudes), ('latitude', latitudes)]
# extract
sampled_cube = iris.analysis.trajectory.interpolate(cube, sample_points)
self.assertCML(sampled_cube, ('trajectory', 'tri_polar_latitude_slice.cml'))
# turn it upside down for the visualisation
plot_cube = sampled_cube[0]
plot_cube = plot_cube[::-1, :]
plt.clf()
plt.pcolormesh(plot_cube.data, vmin=cube.data.min(), vmax=cube.data.max())
plt.colorbar()
self.check_graphic()
# Try to request linear interpolation.
# Not allowed, as we have multi-dimensional coords.
self.assertRaises(iris.exceptions.CoordinateMultiDimError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linear")
# Try to request unknown interpolation.
self.assertRaises(ValueError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linekar")
def test_hybrid_height(self):
cube = tests.stock.simple_4d_with_hybrid_height()
# Put a data manager on the cube so that we can test deferred loading.
cube._data_manager = ConcreteDataManager(cube.data)
cube._data = np.empty([])
traj = (('grid_latitude',[20.5, 21.5, 22.5, 23.5]),
('grid_longitude',[31, 32, 33, 34]))
xsec = iris.analysis.trajectory.interpolate(cube, traj, method='nearest')
# Check that creating the trajectory hasn't led to the original
# data being loaded.
self.assertIsNotNone(cube._data_manager)
self.assertCML([cube, xsec], ('trajectory', 'hybrid_height.cml'))
class ConcreteDataManager(DataManager):
"""
Implements the DataManager interface for a real array.
Useful for testing. Obsolete with biggus.
"""
def __init__(self, concrete_array, deferred_slices=()):
DataManager.__init__(self, concrete_array.shape,
concrete_array.dtype,
mdi=None, deferred_slices=deferred_slices)
# Add the concrete array as an attribute on the manager.
object.__setattr__(self, 'concrete_array', concrete_array)
def load(self, proxy_array):
data = self.concrete_array[self._deferred_slice_merge()]
if not data.flags['C_CONTIGUOUS']:
data = data.copy()
return data
def new_data_manager(self, deferred_slices):
return ConcreteDataManager(self.concrete_array, deferred_slices)
if __name__ == '__main__':
tests.main()
| gpl-3.0 |
dhruvesh13/Audio-Genre-Classification | learn.py | 1 | 4259 | import sklearn
from sklearn import linear_model
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import scipy
import os
import sys
import glob
import numpy as np
from utils1 import GENRE_DIR, GENRE_LIST
from sklearn.externals import joblib
from random import shuffle
"""reads FFT-files and prepares X_train and y_train.
genre_list must consist of names of folders/genres consisting of the required FFT-files
base_dir must contain genre_list of directories
"""
def read_fft(genre_list, base_dir):
X = []
y = []
for label, genre in enumerate(genre_list):
# create UNIX pathnames to id FFT-files.
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
# get path names that math genre-dir
file_list = glob.glob(genre_dir)
for file in file_list:
fft_features = np.load(file)
X.append(fft_features)
y.append(label)
return np.array(X), np.array(y)
"""reads MFCC-files and prepares X_train and y_train.
genre_list must consist of names of folders/genres consisting of the required MFCC-files
base_dir must contain genre_list of directories
"""
def read_ceps(genre_list, base_dir):
X= []
y=[]
for label, genre in enumerate(genre_list):
for fn in glob.glob(os.path.join(base_dir, genre, "*.ceps.npy")):
ceps = np.load(fn)
num_ceps = len(ceps)
X.append(np.mean(ceps[int(num_ceps*1/10):int(num_ceps*9/10)], axis=0))
#X.append(ceps)
y.append(label)
print(np.array(X).shape)
print(len(y))
return np.array(X), np.array(y)
def learn_and_classify(X_train, y_train, X_test, y_test, genre_list):
print(len(X_train))
print(len(X_train[0]))
#Logistic Regression classifier
logistic_classifier = linear_model.logistic.LogisticRegression()
logistic_classifier.fit(X_train, y_train)
logistic_predictions = logistic_classifier.predict(X_test)
logistic_accuracy = accuracy_score(y_test, logistic_predictions)
logistic_cm = confusion_matrix(y_test, logistic_predictions)
print("logistic accuracy = " + str(logistic_accuracy))
print("logistic_cm:")
print(logistic_cm)
#change the pickle file when using another classifier eg model_mfcc_fft
joblib.dump(logistic_classifier, 'saved_models/model_mfcc_log.pkl')
#K-Nearest neighbour classifier
knn_classifier = KNeighborsClassifier()
knn_classifier.fit(X_train, y_train)
knn_predictions = knn_classifier.predict(X_test)
knn_accuracy = accuracy_score(y_test, knn_predictions)
knn_cm = confusion_matrix(y_test, knn_predictions)
print("knn accuracy = " + str(knn_accuracy))
print("knn_cm:")
print(knn_cm)
joblib.dump(knn_classifier, 'saved_models/model_mfcc_knn.pkl')
plot_confusion_matrix(logistic_cm, "Confusion matrix", genre_list)
plot_confusion_matrix(knn_cm, "Confusion matrix for FFT classification", genre_list)
def plot_confusion_matrix(cm, title, genre_list, cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(genre_list))
plt.xticks(tick_marks, genre_list, rotation=45)
plt.yticks(tick_marks, genre_list)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def main():
base_dir_fft = GENRE_DIR
base_dir_mfcc = GENRE_DIR
"""list of genres (these must be folder names consisting .wav of respective genre in the base_dir)
Change list if needed.
"""
genre_list = [ "blues","classical","country","disco","metal"]
#genre_list = ["classical", "jazz"] IF YOU WANT TO CLASSIFY ONLY CLASSICAL AND JAZZ
#use FFT
# X, y = read_fft(genre_list, base_dir_fft)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .20)
# print('\n******USING FFT******')
# learn_and_classify(X_train, y_train, X_test, y_test, genre_list)
# print('*********************\n')
#use MFCC
X,y= read_ceps(genre_list, base_dir_mfcc)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .20)
print("new1",X_train.shape)
print('******USING MFCC******')
learn_and_classify(X_train, y_train, X_test, y_test, genre_list)
print('*********************')
if __name__ == "__main__":
main() | mit |
ankurankan/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
PeterRochford/SkillMetrics | Examples/taylor3.py | 1 | 4261 | '''
How to create a Taylor diagram with labeled data points and modified axes
A third example of how to create a Taylor diagram given one set of
reference observations and multiple model predictions for the quantity.
This example is a variation on the first example (taylor1) where now the
data points are labeled and axes properties are specified. The number format
is also specified for the RMS contour labels.
All functions in the Skill Metrics library are designed to only work with
one-dimensional arrays, e.g. time series of observations at a selected
location. The one-dimensional data are read in as dictionaries via a
pickle file: ref['data'], pred1['data'], pred2['data'],
and pred3['data']. The plot is written to a file in Portable Network
Graphics (PNG) format.
The reference data used in this example are cell concentrations of a
phytoplankton collected from cruise surveys at selected locations and
time. The model predictions are from three different simulations that
have been space-time interpolated to the location and time of the sample
collection. Details on the contents of the dictionary (once loaded) can
be obtained by simply executing the following two statements
>> key_to_value_lengths = {k:len(v) for k, v in ref.items()}
>> print key_to_value_lengths
{'units': 6, 'longitude': 57, 'jday': 57, 'date': 57, 'depth': 57,
'station': 57, 'time': 57, 'latitude': 57, 'data': 57}
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
Created on Dec 6, 2016
@author: prochford@thesymplectic.com
'''
import matplotlib.pyplot as plt
import numpy as np
import pickle
import skill_metrics as sm
from sys import version_info
def load_obj(name):
# Load object from file in pickle format
if version_info[0] == 2:
suffix = 'pkl'
else:
suffix = 'pkl3'
with open(name + '.' + suffix, 'rb') as f:
return pickle.load(f) # Python2 succeeds
class Container(object):
def __init__(self, pred1, pred2, pred3, ref):
self.pred1 = pred1
self.pred2 = pred2
self.pred3 = pred3
self.ref = ref
if __name__ == '__main__':
# Close any previously open graphics windows
# ToDo: fails to work within Eclipse
plt.close('all')
# Read data from pickle file
data = load_obj('taylor_data')
# Calculate statistics for Taylor diagram
# The first array element (e.g. taylor_stats1[0]) corresponds to the
# reference series while the second and subsequent elements
# (e.g. taylor_stats1[1:]) are those for the predicted series.
taylor_stats1 = sm.taylor_statistics(data.pred1,data.ref,'data')
taylor_stats2 = sm.taylor_statistics(data.pred2,data.ref,'data')
taylor_stats3 = sm.taylor_statistics(data.pred3,data.ref,'data')
# Store statistics in arrays
sdev = np.array([taylor_stats1['sdev'][0], taylor_stats1['sdev'][1],
taylor_stats2['sdev'][1], taylor_stats3['sdev'][1]])
crmsd = np.array([taylor_stats1['crmsd'][0], taylor_stats1['crmsd'][1],
taylor_stats2['crmsd'][1], taylor_stats3['crmsd'][1]])
ccoef = np.array([taylor_stats1['ccoef'][0], taylor_stats1['ccoef'][1],
taylor_stats2['ccoef'][1], taylor_stats3['ccoef'][1]])
# Specify labels for points in a cell array (M1 for model prediction 1,
# etc.). Note that a label needs to be specified for the reference even
# though it is not used.
label = ['Non-Dimensional Observation', 'M1', 'M2', 'M3']
'''
Produce the Taylor diagram
Label the points and change the axis options for SDEV, CRMSD, and CCOEF.
For an exhaustive list of options to customize your diagram,
please call the function at a Python command line:
>> taylor_diagram
'''
intervalsCOR = np.concatenate((np.arange(0,1.0,0.2),
[0.9, 0.95, 0.99, 1]))
sm.taylor_diagram(sdev,crmsd,ccoef, markerLabel = label,
tickRMS = np.arange(0,60,20),
tickSTD = np.arange(0,55,5), tickCOR = intervalsCOR,
rmslabelformat = ':.1f')
# Write plot to file
plt.savefig('taylor3.png')
# Show plot
plt.show()
| gpl-3.0 |
axelmagn/metrics | ai-metrics/aimetrics/metrics.py | 1 | 7181 | import json
import numpy as np
from sklearn.cross_validation import (StratifiedShuffleSplit, StratifiedKFold,
KFold)
from sklearn.preprocessing import binarize, normalize
from sklearn.metrics import (accuracy_score, roc_curve, roc_auc_score,
f1_score, classification_report)
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPClient
from urllib.parse import urljoin
from .conf import get_conf
from .estimator import RemoteBSTClassifier
_conf = get_conf()['aimetrics']['metrics']
@gen.coroutine
def fetch_data(base_url, client_id, project_id, **kwargs):
"""Fetch all labeled records from cloudant for a project
Arguments
---------
base_url : str
client_id : str
project_id : str
**kwargs : **dict
Any additional keyword arguments are passed to
tornado.httpclient.AsyncHTTPClient.fetch. This is where
authentication credentials can be specified.
"""
http_client = AsyncHTTPClient()
url_suffix = _conf['data']['url_suffix'].format(client_id=client_id,
project_id=project_id)
url = urljoin(base_url, url_suffix)
method = _conf['data']['method']
response = yield http_client.fetch(url, method=method, **kwargs)
data = json.loads(response.body.decode('utf-8'))
features = data[0]['input'].keys()
classes = data[0]['output'].keys()
# print("DATA: " + response.body.decode('utf-8'))
X = np.asarray([[row['input'][k] for k in features] for row in data])
y = np.asarray([[row['output'].get(k, 0) for k in classes] for row in data])
return {
"features": features,
"classes": classes,
"X": X,
'y': y,
}
@gen.coroutine
def remote_classifier_report(base_url, model_type, client_id, project_id,
model_params=None, auth_username=None, auth_password=None,
threshold=0.5, destroy_model=True, save_model=False):
"""Evaluate model performances on a specific BST project dataset.
Performs 5-fold cross-validation using all classified records from the
provided client and project ID, and returns a list of evaluation metrics
from each run.
Parameters
----------
base_url : str
the base URL of the remote API.
model_type : str
The model type to use on the remote API. Refer to the bst.ai project
for available options.
client_id : str
The client's BlackSage Tech ID
project_id : str
The client's project BlackSage Tech ID
auth_username : str (default: None)
The username to use for basic authentication.
auth_password : str (default: None)
The password to use for basic authentication.
model_params : dict (default: {})
Any model parameters for the remote classifier. Refer to the bst.ai
project for available options.
threshold : float (default: 0.5)
The threshold at which to consider a probability prediction a positive
classification for use in metrics which take binary input.
destroy_model : boolean (default: True)
If True, the trained remote model is destroyed after evaluation is
complete.
save_model : boolean (default: False)
If true, a serialization of the model is attached to the output
dictionary under the key `model`.
"""
data = yield fetch_data(base_url, client_id, project_id,
auth_username=auth_username, auth_password=auth_password)
X, y = normalize(data['X']), normalize(data['y'])
# import ipdb; ipdb.set_trace() # DEBUG
"""
tv_ind, test_ind = StratifiedShuffleSplit(y, 1, 0.2)[0]
X_tv, X_test = X[tv_ind], X[test_ind]
y_tv, y_test = y[tv_ind], y[test_ind]
"""
#skf = StratifiedKFold(y, 5, True)
skf = KFold(y.shape[0], 5, True)
cv_results = []
for train_ind, test_ind in skf:
X_train, X_test = X[train_ind], X[test_ind]
y_train, y_test = y[train_ind], y[test_ind]
result = yield remote_classifier_metrics(base_url, model_type, X_train,
y_train, X_test, y_test, data['classes'],
model_params=model_params, destroy_model=destroy_model,
threshold=threshold, save_model=save_model)
cv_results.append(result)
return {"cross_validation": cv_results}
@gen.coroutine
def remote_classifier_metrics(base_url, model_type, X_train, y_train, X_test,
y_test, data_classes, model_params=None, destroy_model=True,
threshold=0.5, save_model=False):
"""Train and evaluate a single model with the provided data.
Parameters
----------
base_url : str
the base URL of the remote API.
model_type : str
The model type to use on the remote API. Refer to the bst.ai project
for available options.
X_train : np.ndarray
Training feature vectors
y_train : np.ndarray
Training target vectors
X_test : np.ndarray
Testing feature vectors
y_test : np.ndarray
Testing target vectors
data_classes : [str..]
Class labels for y targets
model_params : dict (default: {})
Any model parameters for the remote classifier. Refer to the bst.ai
project for available options.
destroy_model : boolean (default: True)
If True, the trained remote model is destroyed after evaluation is
complete.
threshold : float (default: 0.5)
The threshold at which to consider a probability prediction a positive
classification for use in metrics which take binary input.
save_model : boolean (default: False)
If true, a serialization of the model is attached to the output
dictionary under the key `model`.
Returns: A dictionary of evaluation metrics for the trained model.
"""
# create a new classifier and object to store results
clf = RemoteBSTClassifier(base_url, model_type, model_params=model_params)
result = {}
try:
result['train_error'] = yield clf.async_fit(X_train, y_train)
y_pred_proba = yield clf.async_predict_proba(X_test)
if save_model:
result['model'] = yield clf.get_model()
y_pred = binarize(y_pred_proba, threshold)
result['acc'] = accuracy_score(y_test, y_pred)
result['f1_score'] = f1_score(y_test, y_pred)
result['classification_report'] = classification_report(y_test, y_pred)
roc= {}
for i, label in enumerate(data_classes):
y_test_i = y_test[:,i]
# skip tests with no actual values
if np.sum(y_test_i) == 0:
continue
fpr, tpr, thresh = roc_curve(y_test[:,i], y_pred_proba[:,i])
roc[label] = {
"fpr": list(fpr),
"tpr": list(tpr),
"threshold": list(thresh),
}
result['roc'] = roc
try:
result['roc_auc'] = roc_auc_score(y_test, y_pred_proba)
except:
result['roc_auc'] = None
finally:
if(destroy_model):
yield clf.destroy_model()
return result
| apache-2.0 |
aelsen/Systems_Integration_EMG | scripts_acquisition/myo.py | 8 | 3086 | from __future__ import print_function
from collections import Counter, deque
import sys
import time
import numpy as np
try:
from sklearn import neighbors, svm
HAVE_SK = True
except ImportError:
HAVE_SK = False
from common import *
from myo_raw import MyoRaw
SUBSAMPLE = 3
K = 15
class NNClassifier(object):
'''A wrapper for sklearn's nearest-neighbor classifier that stores
training data in vals0, ..., vals9.dat.'''
def __init__(self):
for i in range(10):
with open('vals%d.dat' % i, 'ab') as f: pass
self.read_data()
def store_data(self, cls, vals):
with open('vals%d.dat' % cls, 'ab') as f:
f.write(pack('8H', *vals))
self.train(np.vstack([self.X, vals]), np.hstack([self.Y, [cls]]))
def read_data(self):
X = []
Y = []
for i in range(10):
X.append(np.fromfile('vals%d.dat' % i, dtype=np.uint16).reshape((-1, 8)))
Y.append(i + np.zeros(X[-1].shape[0]))
self.train(np.vstack(X), np.hstack(Y))
def train(self, X, Y):
self.X = X
self.Y = Y
if HAVE_SK and self.X.shape[0] >= K * SUBSAMPLE:
self.nn = neighbors.KNeighborsClassifier(n_neighbors=K, algorithm='kd_tree')
self.nn.fit(self.X[::SUBSAMPLE], self.Y[::SUBSAMPLE])
else:
self.nn = None
def nearest(self, d):
dists = ((self.X - d)**2).sum(1)
ind = dists.argmin()
return self.Y[ind]
def classify(self, d):
if self.X.shape[0] < K * SUBSAMPLE: return 0
if not HAVE_SK: return self.nearest(d)
return int(self.nn.predict(d)[0])
class Myo(MyoRaw):
'''Adds higher-level pose classification and handling onto MyoRaw.'''
HIST_LEN = 25
def __init__(self, cls, tty=None):
MyoRaw.__init__(self, tty)
self.cls = cls
self.history = deque([0] * Myo.HIST_LEN, Myo.HIST_LEN)
self.history_cnt = Counter(self.history)
self.add_emg_handler(self.emg_handler)
self.last_pose = None
self.pose_handlers = []
def emg_handler(self, emg, moving):
y = self.cls.classify(emg)
self.history_cnt[self.history[0]] -= 1
self.history_cnt[y] += 1
self.history.append(y)
r, n = self.history_cnt.most_common(1)[0]
if self.last_pose is None or (n > self.history_cnt[self.last_pose] + 5 and n > Myo.HIST_LEN / 2):
self.on_raw_pose(r)
self.last_pose = r
def add_raw_pose_handler(self, h):
self.pose_handlers.append(h)
def on_raw_pose(self, pose):
for h in self.pose_handlers:
h(pose)
if __name__ == '__main__':
import subprocess
m = Myo(NNClassifier(), sys.argv[1] if len(sys.argv) >= 2 else None)
m.add_raw_pose_handler(print)
def page(pose):
if pose == 5:
subprocess.call(['xte', 'key Page_Down'])
elif pose == 6:
subprocess.call(['xte', 'key Page_Up'])
m.add_raw_pose_handler(page)
m.connect()
while True:
m.run()
| mit |
Adai0808/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/neighbors/regression.py | 39 | 10464 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
m2dsupsdlclass/lectures-labs | labs/03_neural_recsys/movielens_paramsearch.py | 1 | 9625 | from math import floor, ceil
from time import time
from pathlib import Path
from zipfile import ZipFile
from urllib.request import urlretrieve
from contextlib import contextmanager
import random
from pprint import pprint
import json
import numpy as np
import pandas as pd
import joblib
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import tensorflow as tf
from keras.layers import Input, Embedding, Flatten, merge, Dense, Dropout
from keras.layers import BatchNormalization
from keras.models import Model
from dask import delayed, compute
DEFAULT_LOSS = 'cross_entropy'
ML_100K_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
ML_100K_FILENAME = Path(ML_100K_URL.rsplit('/', 1)[1])
ML_100K_FOLDER = Path('ml-100k')
RESULTS_FILENAME = 'results.json'
MODEL_FILENAME = 'model.h5'
if not ML_100K_FILENAME.exists():
print('Downloading %s to %s...' % (ML_100K_URL, ML_100K_FILENAME))
urlretrieve(ML_100K_URL, ML_100K_FILENAME.name)
if not ML_100K_FOLDER.exists():
print('Extracting %s to %s...' % (ML_100K_FILENAME, ML_100K_FOLDER))
ZipFile(ML_100K_FILENAME.name).extractall('.')
all_ratings = pd.read_csv(ML_100K_FOLDER / 'u.data', sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
DEFAULT_PARAMS = dict(
embedding_size=16,
hidden_size=64,
n_hidden=4,
dropout_embedding=0.3,
dropout_hidden=0.3,
use_batchnorm=True,
loss=DEFAULT_LOSS,
optimizer='adam',
batch_size=64,
)
COMMON_SEARCH_SPACE = dict(
embedding_size=[16, 32, 64, 128],
dropout_embedding=[0, 0.2, 0.5],
dropout_hidden=[0, 0.2, 0.5],
use_batchnorm=[True, False],
loss=['mse', 'mae', 'cross_entropy'],
batch_size=[16, 32, 64, 128],
)
SEARCH_SPACE = [
dict(n_hidden=[0], **COMMON_SEARCH_SPACE),
dict(n_hidden=[1, 2, 3, 4, 5],
hidden_size=[32, 64, 128, 256, 512],
**COMMON_SEARCH_SPACE),
]
def bootstrap_ci(func, data_args, ci_range=(0.025, 0.975), n_iter=10000,
random_state=0):
rng = np.random.RandomState(random_state)
n_samples = data_args[0].shape[0]
results = []
for i in range(n_iter):
# sample n_samples out of n_samples with replacement
idx = rng.randint(0, n_samples - 1, n_samples)
resampled_args = [np.asarray(arg)[idx] for arg in data_args]
results.append(func(*resampled_args))
results = np.sort(results)
return (results[floor(ci_range[0] * n_iter)],
results[ceil(ci_range[1] * n_iter)])
def make_model(user_input_dim, item_input_dim,
embedding_size=16, hidden_size=64, n_hidden=4,
dropout_embedding=0.3, dropout_hidden=0.3,
optimizer='adam', loss=DEFAULT_LOSS, use_batchnorm=True,
**ignored_args):
user_id_input = Input(shape=[1], name='user')
item_id_input = Input(shape=[1], name='item')
user_embedding = Embedding(output_dim=embedding_size,
input_dim=user_input_dim,
input_length=1,
name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=embedding_size,
input_dim=item_input_dim,
input_length=1,
name='item_embedding')(item_id_input)
user_vecs = Flatten()(user_embedding)
item_vecs = Flatten()(item_embedding)
input_vecs = merge([user_vecs, item_vecs], mode='concat')
x = Dropout(dropout_embedding)(input_vecs)
for i in range(n_hidden):
x = Dense(hidden_size, activation='relu')(x)
if i < n_hidden - 1:
x = Dropout(dropout_hidden)(x)
if use_batchnorm:
x = BatchNormalization()(x)
if loss == 'cross_entropy':
y = Dense(output_dim=5, activation='softmax')(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
else:
y = Dense(output_dim=1)(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss=loss)
return model
@contextmanager
def transactional_open(path, mode='wb'):
tmp_path = path.with_name(path.name + '.tmp')
with tmp_path.open(mode=mode) as f:
yield f
tmp_path.rename(path)
@contextmanager
def transactional_fname(path):
tmp_path = path.with_name(path.name + '.tmp')
yield str(tmp_path)
tmp_path.rename(path)
def _compute_scores(model, prefix, user_id, item_id, rating, loss):
preds = model.predict([user_id, item_id])
preds = preds.argmax(axis=1) + 1 if loss == 'cross_entropy' else preds
mse = mean_squared_error(preds, rating)
mae = mean_absolute_error(preds, rating)
mae_ci_min, mae_ci_max = bootstrap_ci(mean_absolute_error, [preds, rating])
results = {}
results[prefix + '_mse'] = mse
results[prefix + '_mae'] = mae
results[prefix + '_mae_ci_min'] = mae_ci_min
results[prefix + '_mae_ci_max'] = mae_ci_max
return results, preds
def evaluate_one(**kwargs):
# Create a single threaded TF session for this Python thread:
# parallelism is leveraged at a coarser level with dask
session = tf.Session(
# graph=tf.Graph(),
config=tf.ConfigProto(intra_op_parallelism_threads=1))
with session.as_default():
# graph-level deterministic weights init
tf.set_random_seed(0)
_evaluate_one(**kwargs)
def _evaluate_one(**kwargs):
params = DEFAULT_PARAMS.copy()
params.update(kwargs)
params_digest = joblib.hash(params)
results = params.copy()
results['digest'] = params_digest
results_folder = Path('results')
results_folder.mkdir(exist_ok=True)
folder = results_folder.joinpath(params_digest)
folder.mkdir(exist_ok=True)
if len(list(folder.glob("*/results.json"))) == 4:
print('Skipping')
split_idx = params.get('split_idx', 0)
print("Evaluating model on split #%d:" % split_idx)
pprint(params)
ratings_train, ratings_test = train_test_split(
all_ratings, test_size=0.2, random_state=split_idx)
max_user_id = all_ratings['user_id'].max()
max_item_id = all_ratings['item_id'].max()
user_id_train = ratings_train['user_id']
item_id_train = ratings_train['item_id']
rating_train = ratings_train['rating']
user_id_test = ratings_test['user_id']
item_id_test = ratings_test['item_id']
rating_test = ratings_test['rating']
loss = params.get('loss', DEFAULT_LOSS)
if loss == 'cross_entropy':
target_train = rating_train - 1
else:
target_train = rating_train
model = make_model(max_user_id + 1, max_item_id + 1, **params)
results['model_size'] = sum(w.size for w in model.get_weights())
nb_epoch = 5
epochs = 0
for i in range(4):
epochs += nb_epoch
t0 = time()
model.fit([user_id_train, item_id_train], target_train,
batch_size=params['batch_size'],
nb_epoch=nb_epoch, shuffle=True, verbose=False)
epoch_duration = (time() - t0) / nb_epoch
train_scores, train_preds = _compute_scores(
model, 'train', user_id_train, item_id_train, rating_train, loss)
results.update(train_scores)
test_scores, test_preds = _compute_scores(
model, 'test', user_id_test, item_id_test, rating_test, loss)
results.update(test_scores)
results['epoch_duration'] = epoch_duration
results['epochs'] = epochs
subfolder = folder.joinpath("%03d" % epochs)
subfolder.mkdir(exist_ok=True)
# Transactional results saving to avoid file corruption on ctrl-c
results_filepath = subfolder.joinpath(RESULTS_FILENAME)
with transactional_open(results_filepath, mode='w') as f:
json.dump(results, f)
model_filepath = subfolder.joinpath(MODEL_FILENAME)
with transactional_fname(model_filepath) as fname:
model.save(fname)
# Save predictions and true labels to be able to recompute new scores
# later
with transactional_open(subfolder / 'test_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'train_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'ratings.npy', mode='wb') as f:
np.save(f, rating_test)
return params_digest
def _model_complexity_proxy(params):
# Quick approximation of the number of tunable parameter to rank models
# by increasing complexity
embedding_size = params['embedding_size']
n_hidden = params['n_hidden']
if n_hidden == 0:
return embedding_size * 2
else:
hidden_size = params['hidden_size']
return (2 * embedding_size * hidden_size +
(n_hidden - 1) * hidden_size ** 2)
if __name__ == "__main__":
seed = 0
n_params = 500
all_combinations = list(ParameterGrid(SEARCH_SPACE))
random.Random(seed).shuffle(all_combinations)
sampled_params = all_combinations[:n_params]
sampled_params.sort(key=_model_complexity_proxy)
evaluations = []
for params in sampled_params:
for split_idx in range(3):
evaluations.append(delayed(evaluate_one)(
split_idx=split_idx, **params))
compute(*evaluations)
| mit |
JosmanPS/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
JLHulme/MhoPerformance | Voltage_plot.py | 1 | 3051 | #Required imports
import math
import matplotlib.pyplot as plt
#Create time vector, in terms of samples, for a time period of 30cycles
length = 30 #cycles
sampleRate = 4 # samples per cycle
time = []
#create global definition for a
deg120 = (math.pi/180) * 120
a = math.cos(deg120)+math.sin(deg120)*1j
for x in range(sampleRate*length):
time.append(x/4.0)
#Define function for voltage memory
class V_Mem:
vMem = complex(0+0j)
vMemN2 = complex(0+0j)
vMemN1 = complex(0+0j)
def __init__(self, startVoltage):
self.vMem = startVoltage
self.vMemN1 = startVoltage
self.vMemN2 = startVoltage
def updateVoltage(self, currentVoltage):
self.vMemN2 = self.vMemN1
self.vMemN1 = self.vMem
self.vMem = (1.0/16.0)*currentVoltage + (15.0/16.0)*self.vMemN2 #+ instead of - as we dont update measured phasor
def getVoltage(self):
return self.vMem
#create a class for to use as a Mho object
class Phase_Mho:
#v1Mem = V_Mem
#Z1L
#mho
def __init__(self, initialV1Mem, lineZ1):
self.v1Mem = V_Mem(initialV1Mem)
self.Z1L = lineZ1
def update(self, V1Fault, IA, IB):
#fault values = [v1Mem, IA, IB]
self.v1Mem.updateVoltage(V1Fault)
#print(faultValues)
currentV1Mem = self.v1Mem.getVoltage()
#print(currentV1Mem)
VA = V1Fault #V1F
VB = (a**2) * V1Fault #V1F@-120
VAB = VA - VB
IAB = IA - IB
VPolA = currentV1Mem
VPolB = currentV1Mem * (a**2)
VPolAB = VPolA - VPolB
#print(VAB)
#print(VA)
#print(VB)
torqNum = VAB * VPolAB.conjugate()
#print(torqNum.real)
torqDen = VPolAB.conjugate() * IAB * (self.Z1L / abs(self.Z1L))
#print(torqDen.real)
self.mho = torqNum.real / torqDen.real
#print(self.mho)
def getMho(self):
return self.mho
def getVMem(self):
return self.v1Mem.getVoltage()
#Simulation vlaues
#Prefault voltage
V1 = 230000/math.sqrt(3) #VLN
#Fault values:
IA = 568.2-2673j #2733@-78
IB = -2599+844.5j #2733@162
V1F = 4173-11464j #12200@-70
lineImp = 0.0843+0.817j #0.82@84.1
#in secondary values
PTR = 2000
CTR = 400
IA = IA / CTR
IB = IB / CTR
V1F = V1F / PTR
V1 = V1 / PTR
lineImp = lineImp * CTR / PTR
#create relay mho obj
rlyMho = Phase_Mho(V1, lineImp)
#simulate
rlyImpedance = []
rlySetting = []
rlyVMem = []
vtest = []
zone2_setting = 0.53
for t in time:
rlyMho.update(V1F, IA, IB)
rlyImpedance.append(rlyMho.getMho())
rlySetting.append(zone2_setting)
v = rlyMho.getVMem()
vtest.append(v)
rlyVMem.append(abs(v))
#plot
plt.plot(time, rlyVMem , 'b', label="V1MEM")
#plt.plot(time, rlySetting, 'r--', label="Trip Setting")
#plt.axvline(3.5, color='k', linestyle='--', label="3.5cy")
#plt.axvline(5.5, color='b', linestyle='--', label="5.5cy")
plt.xlabel('Time (cycles)')
plt.ylabel('Measured Voltage (Vsec)')
plt.legend(shadow=True, loc=4)
plt.show()
| bsd-2-clause |
paninski-lab/yass | src/yass/cluster/cluster.py | 1 | 45893 | # Class to do parallelized clustering
import os
import numpy as np
import networkx as nx
from sklearn.decomposition import PCA
from scipy.spatial import cKDTree
from scipy.stats import chi2
from yass.template import shift_chans, align_get_shifts_with_ref
from yass import mfm
from yass.util import absolute_path_to_asset
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
def warn(*args, **kwargs):
pass
warnings.warn = warn
class Cluster(object):
"""Class for doing clustering."""
def __init__(self, data_in, analysis=False):
"""Sets up the cluster class for each core
Parameters: ...
"""
# load data and check if prev completed
if self.load_data(data_in): return
if analysis: return
# local channel clustering
if self.verbose:
print("START LOCAL")
#
#print (" Triage value: ", self.triage_value)
# neighbour channel clustering
self.initialize(indices_in=np.arange(len(self.spike_times_original)),
local=True)
self.cluster(current_indices=np.arange(len(self.indices_in)),
local=True,
gen=0,
branch=0,
hist=[])
#self.finish_plotting()
if False:
save_dir = self.filename_postclustering
save_dir = save_dir[:save_dir[:(save_dir.rfind('/'))].rfind('/')]
save_dir = os.path.join(save_dir, 'local_clustering_result')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
orig_fname = self.filename_postclustering
fname_save = os.path.join(save_dir, orig_fname[(orig_fname.rfind('/')+1):])
indices_train_local = np.copy(self.indices_train)
templates_local = []
for indices_train_k in self.indices_train:
template = self.get_templates_on_all_channels(indices_train_k)
templates_local.append(template)
# save clusters
self.save_result_local(indices_train_local, templates_local, fname_save)
if self.full_run:
if self.verbose:
print('START DISTANT')
# distant channel clustering
indices_train_local = np.copy(self.indices_train)
indices_train_final = []
templates_final = []
for ii, indices_train_k in enumerate(indices_train_local):
#if self.verbose: print("\nchan/unit {}, UNIT {}/{}".format(self.channel, ii, len(spike_train_local)))
self.distant_ii = ii
self.initialize(indices_in=indices_train_k,
local=False)
self.cluster(current_indices=np.arange(len(self.indices_in)), local=False,
gen=self.history_local_final[ii][0]+1,
branch=self.history_local_final[ii][1],
hist=self.history_local_final[ii][1:])
#self.finish_plotting(local_unit_id=ii)
indices_train_final += self.indices_train
templates_final += self.templates
else:
indices_train_final = []
templates_final = []
for indices_train_k in self.indices_train:
template = self.get_templates_on_all_channels(indices_train_k)
templates_final.append(template)
indices_train_final.append(indices_train_k)
if (self.full_run) and (not self.raw_data):
templates_final_2 = []
indices_train_final_2 = []
for indices_train_k in indices_train_final:
template = self.get_templates_on_all_channels(indices_train_k)
templates_final_2.append(template)
indices_train_final_2.append(indices_train_k)
templates_final = templates_final_2
indices_train_final = indices_train_final_2
# save clusters
self.save_result(indices_train_final, templates_final)
def cluster(self, current_indices, local, gen, branch, hist):
''' Recursive clustering function
channel: current channel being clusterd
wf = wf_PCA: denoised waveforms (# spikes, # time points, # chans)
sic = spike_indices of spikes on current channel
gen = generation of cluster; increases with each clustering step
hist = is the current branch parent history
'''
if self.min(current_indices.shape[0]): return
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', branch: ' +
str(branch)+', # spikes: '+ str(current_indices.shape[0]))
# featurize #1
pca_wf = self.featurize_step(gen, current_indices, current_indices, local)
# knn triage
if self.raw_data:
idx_keep = self.knn_triage_step(gen, pca_wf)
pca_wf = pca_wf[idx_keep]
current_indices = current_indices[idx_keep]
## subsample if too many
#pca_wf_subsample = self.subsample_step(gen, pca_wf_triage)
## run mfm
#vbParam1 = self.run_mfm(gen, pca_wf_subsample)
vbParam2 = self.run_mfm(gen, pca_wf)
## recover spikes using soft-assignments
#idx_recovered, vbParam2 = self.recover_step(gen, vbParam1, pca_wf)
#if self.min(idx_recovered.shape[0]): return
## if recovered spikes < total spikes, do further indexing
#if idx_recovered.shape[0] < pca_wf.shape[0]:
# current_indices = current_indices[idx_recovered]
# pca_wf = pca_wf[idx_recovered]
# connecting clusters
if vbParam2.rhat.shape[1] > 1:
cc_assignment, stability, idx_keep = self.get_cc_and_stability(vbParam2)
current_indices = current_indices[idx_keep]
pca_wf = pca_wf[idx_keep]
else:
cc_assignment = np.zeros(pca_wf.shape[0], 'int32')
stability = [1]
# save generic metadata containing current branch info
self.save_metadata(pca_wf, vbParam2, cc_assignment, current_indices, local,
gen, branch, hist)
# single cluster
if len(stability) == 1:
self.single_cluster_step(current_indices, pca_wf, local,
gen, branch, hist)
# multiple clusters
else:
self.multi_cluster_step(current_indices, pca_wf, local,
cc_assignment, gen, branch, hist)
def save_metadata(self, pca_wf_all, vbParam, cc_label, current_indices, local,
gen, branch, hist):
self.pca_post_triage_post_recovery.append(pca_wf_all)
self.gen_label.append(cc_label)
self.gen_local.append(local)
#self.vbPar_muhat.append(vbParam2.muhat)
self.vbPar_rhat.append(vbParam.rhat)
# save history for every clustered distributions
size_ = 2
size_ += len(hist)
temp = np.zeros(size_, 'int32')
temp[0]=gen
temp[1:-1]=hist
temp[-1]=branch
self.hist.append(temp)
# save history again if local clustering converges in order to do
# distant clustering tracking
self.hist_local = temp
if gen==0 and local:
#self.pca_wf_allchans = self.pca_wf_allchans#[current_indices]
self.indices_gen0 = current_indices
def min(self, n_spikes):
''' Function that checks if spikes left are lower than min_spikes
'''
if n_spikes < self.min_spikes:
return True
return False
def load_data(self, data_in):
''' *******************************************
************ LOADED PARAMETERS ************
*******************************************
'''
# load all input
self.raw_data = data_in[0]
self.full_run = data_in[1]
self.CONFIG = data_in[2]
self.reader_raw = data_in[3]
self.reader_resid = data_in[4]
self.filename_postclustering = data_in[5]
if os.path.exists(self.filename_postclustering):
return True
else:
input_data = np.load(data_in[6])
self.spike_times_original = input_data['spike_times']
self.min_spikes = input_data['min_spikes']
# if there is no spike to cluster, finish
if len(self.spike_times_original) < self.min_spikes:
return True
self.wf_global = input_data['wf']
self.denoised_wf = input_data['denoised_wf']
self.shifts = input_data['shifts']
self.channel = input_data['channel']
if not self.raw_data:
self.template_ids_in = input_data['template_ids_in']
self.templates_in = input_data['templates_in']
self.shifts = input_data['shifts']
self.scales = input_data['scales']
''' ******************************************
*********** FIXED PARAMETERS *************
******************************************
'''
# These are not user/run specific, should be stayed fixed
self.verbose = False
self.selected_PCA_rank = 5
# threshold at which to set soft assignments to 0
self.assignment_delete_threshold = 0.001
# spike size
self.spike_size = self.CONFIG.spike_size
self.center_spike_size = self.CONFIG.center_spike_size
self.neighbors = self.CONFIG.neigh_channels
self.triage_value = self.CONFIG.cluster.knn_triage
# random subsample, remove edge spikes
#self.clean_input_data()
# if there is no spike to cluster, finish
if len(self.spike_times_original) == 0:
return True
''' ******************************************
*********** SAVING PARAMETERS ************
******************************************
'''
# flag to load all chans waveforms and featurizat for ari's work
# Cat: TODO: I don't think we use the meta data from Ari's project any longer; delete?
self.ari_flag = False
self.wf_global_allchans = None
self.pca_wf_allchans = None
self.indices_gen0 = None
self.data_to_fit = None
self.pca_wf_gen0 = None
# list that holds all the final clustered indices for the premerge clusters
self.clustered_indices_local = []
self.clustered_indices_distant = []
# keep track of local idx source for distant clustering in order to
# index into original distribution indexes
self.distant_ii = None
# initialize metadata saves; easier to do here than using local flags + conditional
self.pca_post_triage_post_recovery=[]
self.vbPar_rhat=[]
#self.vbPar_muhat=[]
self.hist=[]
self.gen_label = []
self.gen_local = []
# this list track the first clustering indexes
self.history_local_final=[]
# return flag that clustering not yet complete
return False
def clean_input_data(self):
# limit clustering to at most 50,000 spikes
max_spikes = self.CONFIG.cluster.max_n_spikes
if len(self.spike_times_original)>max_spikes:
idx_sampled = np.random.choice(
a=np.arange(len(self.spike_times_original)),
size=max_spikes,
replace=False)
self.spike_times_original = self.spike_times_original[idx_sampled]
else:
idx_sampled = np.arange(len(self.spike_times_original))
# limit indexes away from edge of recording
idx_inbounds = np.where(np.logical_and(
self.spike_times_original>=self.spike_size//2,
self.spike_times_original<(self.reader_raw.rec_len-self.spike_size)))[0]
self.spike_times_original = self.spike_times_original[
idx_inbounds].astype('int32')
# clean upsampled ids if available
if not self.raw_data:
self.template_ids_in = self.template_ids_in[
idx_sampled][idx_inbounds].astype('int32')
def initialize(self, indices_in, local):
# reset spike_train and templates for both local and distant clustering
self.indices_train = []
self.templates = []
self.indices_in = indices_in
self.neighbor_chans = np.where(self.neighbors[self.channel])[0]
if local:
# initialize
self.loaded_channels = self.neighbor_chans
else:
# load waveforms
if len(self.indices_in) > 0:
self.load_waveforms(local)
# align waveforms
self.align_step(local)
# denoise waveforms on active channels
self.denoise_step(local)
def load_waveforms(self, local):
''' Waveforms only loaded once in gen0 before local clustering starts
'''
if self.verbose:
print ("chan "+str(self.channel)+", loading {} waveforms".format(
len(self.indices_in)))
if local:
self.loaded_channels = self.neighbor_chans
else:
self.loaded_channels = np.arange(self.reader_raw.n_channels)
# load waveforms from raw data
spike_times = self.spike_times_original[self.indices_in]
if self.raw_data:
self.wf_global, skipped_idx = self.reader_raw.read_waveforms(
spike_times, self.spike_size, self.loaded_channels)
# or from residual and add templates
else:
template_ids_in_ = self.template_ids_in[self.indices_in]
shifts_ = self.shifts[self.indices_in]
scales_ = self.scales[self.indices_in]
resid_, skipped_idx = self.reader_resid.read_waveforms(
spike_times, self.spike_size, self.loaded_channels)
template_ids_in_ = self.template_ids_in[self.indices_in]
shifts_ = self.shifts[self.indices_in]
scales_ = self.scales[self.indices_in]
template_ids_in_ = np.delete(template_ids_in_,
skipped_idx)
shifts_ = np.delete(shifts_,
skipped_idx)
scales_ = np.delete(scales_,
skipped_idx)
# align residuals
#resid_ = shift_chans(resid_, -shifts_)
# make clean wfs
temp_ = self.templates_in[:,:,self.loaded_channels]
self.wf_global = (resid_ +
scales_[:, None, None]*temp_[template_ids_in_])
# Cat: TODO: we're cliping the waveforms at 1000 SU; need to check this
# clip waveforms; seems necessary for neuropixel probe due to artifacts
self.wf_global = self.wf_global.clip(min=-1000, max=1000)
# delete any spikes that could not be loaded in previous step
if len(skipped_idx)>0:
self.indices_in = np.delete(self.indices_in, skipped_idx)
def align_step(self, local):
if self.verbose:
print ("chan "+str(self.channel)+", aligning")
# align waveforms by finding best shfits
if local:
mc = np.where(self.loaded_channels==self.channel)[0][0]
best_shifts = align_get_shifts_with_ref(
self.wf_global[:, :, mc])
self.shifts[self.indices_in] = best_shifts
else:
best_shifts = self.shifts[self.indices_in]
self.wf_global = shift_chans(self.wf_global, best_shifts)
if self.ari_flag:
pass
#self.wf_global_allchans = shift_chans(self.wf_global_allchans,
# best_shifts)
def denoise_step(self, local):
if local:
self.denoise_step_local()
else:
self.denoise_step_distant3()
if self.verbose:
print ("chan "+str(self.channel)+", waveorms denoised to {} dimensions".format(self.denoised_wf.shape[1]))
def denoise_step_local(self):
# align, note: aligning all channels to max chan which is appended to the end
# note: max chan is first from feat_chans above, ensure order is preserved
# note: don't want for wf array to be used beyond this function
# Alignment: upsample max chan only; linear shift other chans
n_data, _, n_chans = self.wf_global.shape
self.denoised_wf = np.zeros((n_data, self.pca_main_components_.shape[0], n_chans),
dtype='float32')
for ii in range(n_chans):
if self.loaded_channels[ii] == self.channel:
self.denoised_wf[:, :, ii] = np.matmul(
self.wf_global[:, :, ii],
self.pca_main_components_.T)/self.pca_main_noise_std[np.newaxis]
else:
self.denoised_wf[:, :, ii] = np.matmul(
self.wf_global[:, :, ii],
self.pca_sec_components_.T)/self.pca_sec_noise_std[np.newaxis]
self.denoised_wf = np.reshape(self.denoised_wf, [n_data, -1])
#energy = np.median(np.square(self.denoised_wf), axis=0)
#good_features = np.where(energy > 0.5)[0]
#if len(good_features) < self.selected_PCA_rank:
# good_features = np.argsort(energy)[-self.selected_PCA_rank:]
#self.denoised_wf = self.denoised_wf[:, good_features]
def denoise_step_distant(self):
# active locations with negative energy
energy = np.median(np.square(self.wf_global), axis=0)
good_t, good_c = np.where(energy > 0.5)
# limit to max_timepoints per channel
max_timepoints = 3
unique_channels = np.unique(good_c)
idx_keep = np.zeros(len(good_t), 'bool')
for channel in unique_channels:
idx_temp = np.where(good_c == channel)[0]
if len(idx_temp) > max_timepoints:
idx_temp = idx_temp[
np.argsort(
energy[good_t[idx_temp], good_c[idx_temp]]
)[-max_timepoints:]]
idx_keep[idx_temp] = True
good_t = good_t[idx_keep]
good_c = good_c[idx_keep]
if len(good_t) == 0:
good_t, good_c = np.where(energy == np.max(energy))
self.denoised_wf = self.wf_global[:, good_t, good_c]
def denoise_step_distant2(self):
# active locations with negative energy
#energy = np.median(np.square(self.wf_global), axis=0)
#template = np.median(self.wf_global, axis=0)
#good_t, good_c = np.where(np.logical_and(energy > 0.5, template < - 0.5))
template = np.median(self.wf_global, axis=0)
good_t, good_c = np.where(template < -0.5)
if len(good_t) > self.selected_PCA_rank:
t_diff = 1
# lowest among all
#main_c_loc = np.where(good_c==self.channel)[0]
#max_chan_energy = energy[good_t[main_c_loc]][:,self.channel]
#index = main_c_loc[np.argmax(max_chan_energy)]
index = template[good_t, good_c].argmin()
keep = connecting_points(np.vstack((good_t, good_c)).T, index, self.neighbors, t_diff)
good_t = good_t[keep]
good_c = good_c[keep]
# limit to max_timepoints per channel
max_timepoints = 3
unique_channels = np.unique(good_c)
idx_keep = np.zeros(len(good_t), 'bool')
for channel in unique_channels:
idx_temp = np.where(good_c == channel)[0]
if len(idx_temp) > max_timepoints:
idx_temp = idx_temp[np.argsort(
template[good_t[idx_temp], good_c[idx_temp]])[:max_timepoints]]
idx_keep[idx_temp] = True
good_t = good_t[idx_keep]
good_c = good_c[idx_keep]
self.denoised_wf = self.wf_global[:, good_t, good_c]
else:
idx = np.argsort(template.reshape(-1))[-self.selected_PCA_rank:]
self.denoised_wf = self.wf_global.reshape(self.wf_global.shape[0], -1)[:, idx]
def denoise_step_distant3(self):
center_idx = slice(self.spike_size//2 - self.center_spike_size//2,
self.spike_size//2 + self.center_spike_size//2 + 1)
wf_global_center = self.wf_global[:, center_idx]
energy = np.median(wf_global_center, axis=0)
#max_energy = np.min(energy, axis=0)
#main_channel_loc = np.where(self.loaded_channels == self.channel)[0][0]
# max_energy_loc is n x 2 matrix, where each row has time point and channel info
#th = np.max((-2, max_energy[main_channel_loc]))
#max_energy_loc_c = np.where(max_energy <= th)[0]
#max_energy_loc_t = energy.argmin(axis=0)[max_energy_loc_c]
#max_energy_loc = np.hstack((max_energy_loc_t[:, np.newaxis],
# max_energy_loc_c[:, np.newaxis]))
#t_diff = 3
#index = np.where(max_energy_loc[:, 1]== main_channel_loc)[0][0]
#keep = connecting_points(max_energy_loc, index, self.neighbors, t_diff)
max_energy_loc = np.vstack(np.where(np.abs(energy) > 2)).T
# also high MAD points
mad_var = np.square(np.median(np.abs(wf_global_center - energy[None]),
axis=0)/0.67449)
high_mad_loc = np.vstack(np.where(mad_var > 1.5)).T
if len(max_energy_loc) > 0 and len(high_mad_loc) > 0:
max_energy_loc = np.unique(np.vstack((max_energy_loc, high_mad_loc)), axis=0)
elif len(high_mad_loc) > 0:
max_energy_loc = high_mad_loc
if len(max_energy_loc) < self.selected_PCA_rank:
idx_in = np.argsort(np.abs(energy).reshape(-1))[::-1][:self.selected_PCA_rank]
self.denoised_wf = wf_global_center.reshape(wf_global_center.shape[0], -1)[:, idx_in]
else:
self.denoised_wf = wf_global_center[:, max_energy_loc[:,0], max_energy_loc[:,1]]
#if np.sum(keep) >= self.selected_PCA_rank:
# max_energy_loc = max_energy_loc[keep]
#else:
# idx_sorted = np.argsort(
# energy[max_energy_loc[:,0], max_energy_loc[:,1]])[-self.selected_PCA_rank:]
# max_energy_loc = max_energy_loc[idx_sorted]
# exclude main and secondary channels
#if np.sum(~np.in1d(max_energy_loc[:,1], self.neighbor_chans)) > 0:
# max_energy_loc = max_energy_loc[~np.in1d(max_energy_loc[:,1], self.neighbor_chans)]
#else:
# max_energy_loc = max_energy_loc[max_energy_loc[:,1]==main_channel_loc]
# denoised wf in distant channel clustering is
# the most active time point in each active channels
#self.denoised_wf = self.wf_global[:, max_energy_loc[:,0], max_energy_loc[:,1]]
#self.denoised_wf = np.zeros((self.wf_global.shape[0], len(max_energy_loc)), dtype='float32')
#for ii in range(len(max_energy_loc)):
# self.denoised_wf[:, ii] = self.wf_global[:, max_energy_loc[ii,0], max_energy_loc[ii,1]]
def featurize_step(self, gen, indices_to_feat, indices_to_transform, local):
''' Indices hold the index of the current spike times relative all spikes
'''
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', featurizing')
# find high variance area.
# Including low variance dimensions can lead to overfitting
# (splitting based on collisions)
rank = min(len(indices_to_feat), self.denoised_wf.shape[1], self.selected_PCA_rank)
#stds = np.std(self.denoised_wf[indices_to_feat], axis=0)
#good_d = np.where(stds > 1.05)[0]
#if len(good_d) < rank:
# good_d = np.argsort(stds)[::-1][:rank]
pca = PCA(n_components=rank)
#pca.fit(self.denoised_wf[indices_to_feat][:, good_d])
#pca_wf = pca.transform(
# self.denoised_wf[indices_to_transform][:, good_d]).astype('float32')
pca.fit(self.denoised_wf[indices_to_feat])
pca_wf = pca.transform(
self.denoised_wf[indices_to_transform]).astype('float32')
if gen==0 and local:
# save gen0 distributions before triaging
#data_to_fit = self.denoised_wf[:, good_d]
#n_samples, n_features = data_to_fit.shape
#pca = PCA(n_components=min(self.selected_PCA_rank, n_features))
#pca_wf_gen0 = pca.fit_transform(data_to_fit)
#self.pca_wf_gen0 = pca_wf_gen0.copy()
self.pca_wf_gen0 = pca_wf.copy()
if self.ari_flag and gen==0 and local:
# Cat: TODO: do this only once per channel
# Also, do not index into wf_global_allchans; that's done at completion
#if self.wf_global_allchans.shape[1] > self.selected_PCA_rank:
# denoise global data:
wf_global_denoised = self.denoise_step_distant_all_chans()
# flatten data over last 2 dimensions first
n_data, _ = wf_global_denoised.shape
wf_allchans_2D = wf_global_denoised
stds = np.std(wf_allchans_2D, axis=0)
good_d = np.where(stds > 1.05)[0]
if len(good_d) < self.selected_PCA_rank:
good_d = np.argsort(stds)[::-1][:self.selected_PCA_rank]
data_to_fit = wf_allchans_2D[:, good_d]
n_samples, n_features = data_to_fit.shape
pca = PCA(n_components=min(self.selected_PCA_rank, n_features))
# keep original uncompressed data
self.data_to_fit = data_to_fit
# compress data to selectd pca rank
self.pca_wf_allchans = pca.fit_transform(data_to_fit)
return pca_wf
def subsample_step(self, gen, pca_wf):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', random subsample')
if pca_wf.shape[0]> self.max_mfm_spikes:
#if self.full_run:
if True:
idx_subsampled = coreset(
pca_wf, self.max_mfm_spikes)
else:
idx_subsampled = np.random.choice(np.arange(pca_wf.shape[0]),
size=self.max_mfm_spikes,
replace=False)
pca_wf = pca_wf[idx_subsampled]
return pca_wf
def run_mfm(self, gen, pca_wf):
mask = np.ones((pca_wf.shape[0], 1))
group = np.arange(pca_wf.shape[0])
vbParam = mfm.spikesort(pca_wf[:,:,np.newaxis],
mask,
group,
self.CONFIG)
if self.verbose:
print("chan "+ str(self.channel)+', gen '\
+str(gen)+", "+str(vbParam.rhat.shape[1])+" clusters from ",pca_wf.shape)
return vbParam
def knn_triage_step(self, gen, pca_wf):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', knn triage')
knn_triage_threshold = 100*(1-self.triage_value)
if pca_wf.shape[0] > 1/self.triage_value:
idx_keep = knn_triage(knn_triage_threshold, pca_wf)
idx_keep = np.where(idx_keep==1)[0]
else:
idx_keep = np.arange(pca_wf.shape[0])
return idx_keep
# Cat: TODO: remove this function?! also it seems like it's setting global triage value not just local
def knn_triage_dynamic(self, gen, vbParam, pca_wf):
ids = np.where(vbParam.nuhat > self.min_spikes)[0]
if ids.size <= 1:
self.triage_value = 0
return np.arange(pca_wf.shape[0])
muhat = vbParam.muhat[:,ids,0].T
cov = vbParam.invVhat[:,:,ids,0].T / vbParam.nuhat[ids,np.newaxis, np.newaxis]
# Cat: TODO: move to CONFIG/init function
min_spikes = min(self.min_spikes_triage, pca_wf.shape[0]//ids.size) ##needs more systematic testing, working on it
pca_wf_temp = np.zeros([min_spikes*cov.shape[0], cov.shape[1]])
#assignment_temp = np.zeros(min_spikes*cov.shape[0], dtype = int)
for i in range(cov.shape[0]):
pca_wf_temp[i*min_spikes:(i+1)*min_spikes]= np.random.multivariate_normal(muhat[i], cov[i], min_spikes)
#assignment_temp[i*min_spikes:(i+1)*min_spikes] = i
kdist_temp = knn_dist(pca_wf_temp)
kdist_temp = kdist_temp[:,1:]
median_distances = np.zeros([cov.shape[0]])
for i in range(median_distances.shape[0]):
#median_distances[i] = np.median(np.median(kdist_temp[i*min_spikes:(i+1)*min_spikes], axis = 0), axis = 0)
median_distances[i] = np.percentile(np.sum(kdist_temp[i*min_spikes:(i+1)*min_spikes], axis = 1), 90)
## The percentile value also needs to be tested, value of 50 and scale of 1.2 works wells
kdist = np.sum(knn_dist(pca_wf)[:, 1:], axis=1)
min_threshold = np.percentile(kdist, 100*float(self.CONFIG.cluster.min_spikes)/len(kdist))
threshold = max(np.median(median_distances), min_threshold)
idx_keep = kdist <= threshold
self.triage_value = 1.0 - idx_keep.sum()/idx_keep.size
if np.sum(idx_keep) < self.min_spikes:
raise ValueError("{} kept out of {}, min thresh: {}, actual threshold {}, max dist {}".format(idx_keep.sum(),idx_keep.size, min_threshold, threshold, np.max(kdist)))
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', '+str(np.round(self.triage_value*100))+'% triaged from adaptive knn triage')
return np.where(idx_keep)[0]
def recover_step(self, gen, vbParam, pca_wf_all):
# for post-deconv reclustering, we can safely cluster only 10k spikes or less
idx_recovered, vbParam = self.recover_spikes(vbParam, pca_wf_all)
if self.verbose:
print ("chan "+ str(self.channel)+', gen '+str(gen)+", recovered ",
str(idx_recovered.shape[0])+ " spikes")
return idx_recovered, vbParam
def recover_spikes(self, vbParam, pca, maha_dist=1):
N, D = pca.shape
# Cat: TODO: check if this maha thresholding recovering distance is good
threshold = np.sqrt(chi2.ppf(0.99, D))
# update rhat on full data
maskedData = mfm.maskData(pca[:,:,np.newaxis], np.ones([N, 1]), np.arange(N))
vbParam.update_local(maskedData)
# calculate mahalanobis distance
maha = mfm.calc_mahalonobis(vbParam, pca[:,:,np.newaxis])
idx_recovered = np.where(~np.all(maha >= threshold, axis=1))[0]
vbParam.rhat = vbParam.rhat[idx_recovered]
# zero out low assignment vals
if True:
vbParam.rhat[vbParam.rhat < self.assignment_delete_threshold] = 0
vbParam.rhat = vbParam.rhat/np.sum(vbParam.rhat,
1, keepdims=True)
return idx_recovered, vbParam
def calculate_stability(self, rhat):
K = rhat.shape[1]
mask = rhat > 0.0
stability = np.zeros(K)
for clust in range(stability.size):
if mask[:,clust].sum() == 0.0:
continue
stability[clust] = np.average(mask[:,clust] * rhat[:,clust], axis = 0, weights = mask[:,clust])
return stability
def get_k_cc(self, maha, maha_thresh_min, k_target):
# it assumes that maha_thresh_min gives
# at least k+1 number of connected components
k_now = k_target + 1
if len(self.get_cc(maha, maha_thresh_min)) != k_now:
raise ValueError("something is not right")
maha_thresh = maha_thresh_min
while k_now > k_target:
maha_thresh += 1
cc = self.get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
else:
maha_thresh_max = maha_thresh
maha_thresh_min = maha_thresh - 1
if len(self.get_cc(maha, maha_thresh_min)) <= k_target:
raise ValueError("something is not right")
ctr = 0
maha_thresh_max_init = maha_thresh_max
while True:
ctr += 1
maha_thresh = (maha_thresh_max + maha_thresh_min)/2.0
cc = self.get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
elif k_now > k_target:
maha_thresh_min = maha_thresh
elif k_now < k_target:
maha_thresh_max = maha_thresh
if ctr > 1000:
print(k_now, k_target, maha_thresh, maha_thresh_max_init)
print(cc)
print(len(self.get_cc(maha, maha_thresh+0.001)))
print(len(self.get_cc(maha, maha_thresh-0.001)))
raise ValueError("something is not right")
def get_cc(self, maha, maha_thresh):
row, column = np.where(maha<maha_thresh)
G = nx.DiGraph()
for i in range(maha.shape[0]):
G.add_node(i)
for i, j in zip(row,column):
G.add_edge(i, j)
cc = [list(units) for units in nx.strongly_connected_components(G)]
return cc
def cluster_annealing(self, vbParam):
N, K = vbParam.rhat.shape
stability = self.calculate_stability(vbParam.rhat)
if (K == 2) or np.all(stability > 0.9):
cc = [[k] for k in range(K)]
return vbParam.rhat.argmax(1), stability, cc
maha = mfm.calc_mahalonobis(vbParam, vbParam.muhat.transpose((1,0,2)))
maha = np.maximum(maha, maha.T)
#N, K = vbParam.rhat.shape
#mu = np.copy(vbParam.muhat[:,:,0].T)
#mudiff = mu[:,np.newaxis] - mu
#prec = vbParam.Vhat[:,:,:,0].T * vbParam.nuhat[:,np.newaxis, np.newaxis]
#maha = np.matmul(np.matmul(mudiff[:, :, np.newaxis], prec[:, np.newaxis]), mudiff[:, :, :, np.newaxis])[:, :, 0, 0]
# decrease number of connected components one at a time.
# in any step if all components are stables, stop and return
# otherwise, go until there are only two connected components and return it
maha_thresh_min = 0
for k_target in range(K-1, 1, -1):
# get connected components with k_target number of them
cc, maha_thresh_min = self.get_k_cc(maha, maha_thresh_min, k_target)
# calculate soft assignment for each cc
rhat_cc = np.zeros([N,len(cc)])
for i, units in enumerate(cc):
rhat_cc[:, i] = np.sum(vbParam.rhat[:, units], axis=1)
rhat_cc[rhat_cc<0.001] = 0.0
rhat_cc = rhat_cc/np.sum(rhat_cc,axis =1 ,keepdims = True)
# calculate stability for each component
# and make decision
stability = self.calculate_stability(rhat_cc)
if np.all(stability>0.9) or k_target == 2:
return rhat_cc.argmax(1), stability, cc
def get_cc_and_stability(self, vbParam):
cc_assignment, stability, cc = self.cluster_annealing(vbParam)
n_counts = np.zeros(len(cc), 'int32')
unique_ccs, n_counts_unique = np.unique(cc_assignment, return_counts=True)
n_counts[unique_ccs] = n_counts_unique
idx_keep = np.arange(len(cc_assignment))
while np.min(n_counts) < self.min_spikes and np.max(n_counts) >= self.min_spikes:
cc_keep = np.where(n_counts >= self.min_spikes)[0]
idx_keep_current = np.where(np.in1d(cc_assignment, cc_keep))[0]
vbParam.rhat = vbParam.rhat[idx_keep_current]
k_keep = np.hstack([cc[c] for c in cc_keep])
if len(k_keep) > 1:
vbParam.rhat = vbParam.rhat[:,k_keep]
vbParam.rhat = vbParam.rhat/np.sum(vbParam.rhat, axis=1, keepdims=True)
vbParam.muhat = vbParam.muhat[:, k_keep]
vbParam.Vhat = vbParam.Vhat[:, :, k_keep]
vbParam.nuhat = vbParam.nuhat[k_keep]
cc_assignment, stability, cc = self.cluster_annealing(vbParam)
n_counts = np.zeros(np.max(cc_assignment)+1, 'int32')
unique_ccs, n_counts_unique = np.unique(cc_assignment, return_counts=True)
n_counts[unique_ccs] = n_counts_unique
else:
cc_assignment = np.zeros(len(idx_keep_current), 'int32')
stability = [1]
n_counts = [len(idx_keep_current)]
idx_keep = idx_keep[idx_keep_current]
return cc_assignment, stability, idx_keep
def single_cluster_step(self, current_indices, pca_wf, local,
gen, branch, hist):
# exclude units whose maximum channel is not on the current
# clustered channel; but only during clustering, not during deconv
template = np.mean(self.wf_global[current_indices], axis=0)
#template = stats.trim_mean(self.wf_global[current_indices],
# 0.1, axis=0)
#template = np.mean(self.wf_global[current_indices], axis=0)
assignment = np.zeros(len(current_indices))
mc = self.loaded_channels[np.argmax(template.ptp(0))]
if (mc in self.neighbor_chans) or (not self.raw_data):
N = len(self.indices_train)
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+", >>> cluster "+
str(N)+" saved, size: "+str(len(assignment))+"<<<")
print ("")
self.indices_train.append(self.indices_in[current_indices])
self.templates.append(template)
# save meta data only for initial local group
if local:
self.clustered_indices_local.append(current_indices)
# save the history chain for a completed unit clusterd locally
# this is by distant clustering step by appending to list
self.history_local_final.append(self.hist_local)
else:
# if distant cluster step, use indexes from local step
self.clustered_indices_distant.append(
self.clustered_indices_local[self.distant_ii][current_indices])
else:
if self.verbose:
print (" chan "+str(self.channel)+", template has maxchan "+str(mc),
" skipping ...")
def multi_cluster_step(self, current_indices, pca_wf, local, cc_assignment,
gen, branch_current, hist):
# if self.plotting and gen<20:
# self.plot_clustering_scatter(gen, pca_wf, cc_assignment,
# stability, 'multi split')
# Cat: TODO: unclear how much memory this saves
pca_wf = None
for branch_next, clust in enumerate(np.unique(cc_assignment)):
idx = np.where(cc_assignment==clust)[0]
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+
", reclustering cluster with "+ str(idx.shape[0]) +' spikes')
# add current branch info for child process
# Cat: TODO: this list append is not pythonic
local_hist=list(hist)
local_hist.append(branch_current)
self.cluster(current_indices[idx],local, gen+1, branch_next,
local_hist)
def get_templates_on_all_channels(self, indices_in):
self.indices_in = indices_in
local=False
# temporarily change raw_data option to True
self_raw_data_orig = np.copy(self.raw_data)
self.raw_data = True
self.load_waveforms(local)
self.align_step(local)
template = np.mean(self.wf_global, axis=0)
# change raw_data option back to the orignal
self.raw_data = self_raw_data_orig
return template
def check_max_chan(self, template):
mc = template.ptp(0).argmax()
if np.any(self.neighbor_chans == mc):
return True
else:
return False
def save_result(self, indices_train, templates):
# Cat: TODO: note clustering is done on PCA denoised waveforms but
# templates are computed on original raw signal
# recompute templates to contain full width information...
# fixes numpy bugs
spike_train = [self.spike_times_original[indices] - self.shifts[indices] for indices in indices_train]
if True:
np.savez(self.filename_postclustering,
spiketime=spike_train,
templates=templates
)
else:
pca_post_triage_post_recovery = np.empty(
len(self.pca_post_triage_post_recovery), dtype=object)
pca_post_triage_post_recovery[:] = self.pca_post_triage_post_recovery
vbPar_rhat = np.empty(
len(self.vbPar_rhat), dtype=object)
vbPar_rhat[:] = self.vbPar_rhat
np.savez(self.filename_postclustering,
spiketime=spike_train,
templates=templates,
gen0_fullrank = self.data_to_fit,
pca_wf_gen0=self.pca_wf_gen0,
pca_wf_gen0_allchans=self.pca_wf_allchans,
clustered_indices_local=self.clustered_indices_local,
clustered_indices_distant=self.clustered_indices_distant,
pca_post_triage_post_recovery = pca_post_triage_post_recovery,
vbPar_rhat = vbPar_rhat,
gen_label = self.gen_label,
gen_local = self.gen_local,
#vbPar_muhat = self.vbPar_muhat,
hist = self.hist,
indices_gen0=self.indices_gen0,
#spike_index_prerecluster=self.original_indices,
#templates_prerecluster=self.template_original
)
if self.verbose:
print(self.filename_postclustering)
print("**** starting spikes: {}, found # clusters: {}".format(
len(self.spike_times_original), len(spike_train)))
# Cat: TODO: are these redundant?
keys = []
for key in self.__dict__:
keys.append(key)
for key in keys:
delattr(self, key)
def save_result_local(self, indices_train, templates, fname_save):
spike_train = [self.spike_times_original[indices] - self.shifts[indices] for indices in indices_train]
np.savez(fname_save,
spiketime=spike_train,
templates=templates
)
def knn_triage(th, pca_wf):
tree = cKDTree(pca_wf)
dist, ind = tree.query(pca_wf, k=6)
dist = np.sum(dist, 1)
idx_keep1 = dist <= np.percentile(dist, th)
return idx_keep1
def knn_dist(pca_wf):
tree = cKDTree(pca_wf)
dist, ind = tree.query(pca_wf, k=30)
return dist
def connecting_points(points, index, neighbors, t_diff, keep=None):
if keep is None:
keep = np.zeros(len(points), 'bool')
if keep[index] == 1:
return keep
else:
keep[index] = 1
spatially_close = np.where(neighbors[points[index, 1]][points[:, 1]])[0]
close_index = spatially_close[np.abs(points[spatially_close, 0] - points[index, 0]) <= t_diff]
for j in close_index:
keep = connecting_points(points, j, neighbors, t_diff, keep)
return keep
def coreset(data, m, K=3, delta=0.01):
p = int(np.ceil(np.log2(1/delta)))
B = kmeans_init(data, K, p)
a = 16*(np.log2(K) + 2)
N = data.shape[0]
dists = np.sum(np.square(data[:, None] - B[None]), axis=2)
label = dists.argmin(1)
dists = dists.min(1)
dists_sum = np.sum(dists)
dists_sum_k = np.zeros(K)
for j in range(N):
dists_sum_k[label[j]] += dists[j]
_, n_data_k = np.unique(label, return_counts=True)
s = a*dists + 2*(a*dists_sum_k/n_data_k + dists_sum/n_data_k)[label]
p = s/sum(s)
idx_coreset = np.random.choice(N, size=m, replace=False, p=p)
#weights = 1/(m*p[idx_coreset])
#weights[weights<1] = 1
#weights = np.ones(m)
return idx_coreset#, weights
def kmeans_init(data, K, n_iter):
N, D = data.shape
centers = np.zeros((n_iter, K, D))
dists = np.zeros(n_iter)
for ctr in range(n_iter):
ii = np.random.choice(N, size=1, replace=True, p=np.ones(N)/float(N))
C = data[ii]
L = np.zeros(N, 'int16')
for i in range(1, K):
D = data - C[L]
D = np.sum(D*D, axis=1) #L2 dist
ii = np.random.choice(N, size=1, replace=True, p=D/np.sum(D))
C = np.concatenate((C, data[ii]), axis=0)
L = np.argmax(
2 * np.dot(C, data.T) - np.sum(C*C, axis=1)[:, np.newaxis],
axis=0)
centers[ctr] = C
D = data - C[L]
dists[ctr] = np.sum(np.square(D))
return centers[np.argmin(dists)]
| apache-2.0 |
benhoff/vexbot | setup.py | 2 | 3549 | import os
import re
from setuptools import find_packages, setup
from vexbot.extension_metadata import extensions
VERSIONFILE = 'vexbot/_version.py'
verstrline = open(VERSIONFILE, 'rt').read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in {}".format(VERSIONFILE))
# directory = os.path.abspath(os.path.dirname(__file__))
"""
with open(os.path.join(directory, 'README.rst')) as f:
long_description = f.read()
"""
str_form = '{}={}{}'
extensions_ = []
for name, extension in extensions.items():
extras = extension.get('extras')
if extras is None:
extras = ''
# FIXME: This will error out weirdly if there's not a list
else:
extras = ', '.join(extras)
extras = ' [' + extras + ']'
line = str_form.format(name, extension['path'], extras)
extensions_.append(line)
setup(
name="vexbot",
version=verstr,
description='Python personal assistant',
# long_description=long_description,
url='https://github.com/benhoff/vexbot',
license='GPL3',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux'],
author='Ben Hoff',
author_email='beohoff@gmail.com',
entry_points={'console_scripts': ['vexbot=vexbot.adapters.shell.__main__:main',
'vexbot_robot=vexbot.__main__:main',
'vexbot_irc=vexbot.adapters.irc.__main__:main',
'vexbot_xmpp=vexbot.adapters.xmpp:main',
'vexbot_socket_io=vexbot.adapters.socket_io.__main__:main',
'vexbot_youtube=vexbot.adapters.youtube:main',
'vexbot_stackoverflow=vexbot.adapters.stackoverflow:main',
'vexbot_generate_certificates=vexbot.util.generate_certificates:main',
'vexbot_generate_unit_file=vexbot.util.generate_config_file:main'],
'vexbot_extensions': extensions_},
packages=find_packages(), # exclude=['docs', 'tests']
install_requires=[
# 'pluginmanager>=0.4.1',
'pyzmq',
'vexmessage>=0.4.0',
'rx',
'tblib', # traceback serilization
'tornado', # zmq asnyc framework
'prompt_toolkit>=2.0.0', # shell
],
extras_require={
'nlp': ['wheel', 'spacy', 'sklearn', 'sklearn_crfsuite', 'scipy'],
'socket_io': ['requests', 'websocket-client'],
'summarization': ['gensim', 'newspaper3k'],
'youtube': ['google-api-python-client'],
'dev': ['flake8', 'twine', 'wheel', 'pygments', 'sphinx'],
'xmpp': ['sleekxmpp', 'dnspython'],
'process_name': ['setproctitle'],
'speechtotext': ['speechtotext'],
'digitalocean': ['python-digitalocean'],
'process_manager': ['pydbus'],
'command_line': ['pygments'],
'microphone': ['microphone'],
'database': ['vexstorage'],
'gui': ['chatimusmaximus'],
'entity': ['duckling'],
'irc': ['irc3'],
'system': ['psutil'],
}
)
| gpl-3.0 |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/build/lib.linux-x86_64-2.7/kwiklib/dataio/tests/test_klustersloader.py | 2 | 17397 | """Unit tests for loader module."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from collections import Counter
import numpy as np
import numpy.random as rnd
import pandas as pd
import shutil
from nose.tools import with_setup
from kwiklib.dataio.tests.mock_data import (
nspikes, nclusters, nsamples, nchannels, fetdim, TEST_FOLDER,
setup, teardown)
from kwiklib.dataio import (KlustersLoader, read_clusters, save_clusters,
find_filename, find_indices, filename_to_triplet, triplet_to_filename,
read_cluster_info, save_cluster_info, read_group_info, save_group_info,
renumber_clusters, reorder, convert_to_clu, select, get_indices,
check_dtype, check_shape, get_array, load_text,
find_filename_or_new)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_find_filename():
dir = '/my/path/'
extension_requested = 'spk'
files = [
'blabla.aclu.1',
'blabla_test.aclu.1',
'blabla_test2.aclu.1',
'blabla_test3.aclu.3',
'blabla.spk.1',
'blabla_test.spk.1',
'blabla_test.spk.1',
]
spkfile = find_filename('/my/path/blabla.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla.spk.1'
spkfile = find_filename('/my/path/blabla_test.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.1'
spkfile = find_filename('/my/path/blabla_test2.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.1'
spkfile = find_filename('/my/path/blabla_test3.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.1'
spkfile = find_filename('/my/path/blabla_test3.clu.3', extension_requested,
files=files, dir=dir)
assert spkfile == None
def test_find_filename2():
dir = '/my/path/'
extension_requested = 'spk'
files = [
'blabla.aclu.2',
'blabla_test.aclu.2',
'blabla_test2.aclu.2',
'blabla_test3.aclu.3',
'blabla.spk.2',
'blabla_test.spk.2',
]
spkfile = find_filename('/my/path/blabla_test.xml', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.2'
def test_find_filename_or_new():
dir = '/my/path/'
files = [
'blabla.aclu.1',
'blabla_test.aclu.1',
'blabla_test2.aclu.1',
'blabla_test3.aclu.3',
'blabla.spk.1',
'blabla_test.spk.1',
'blabla_test.spk.1',
]
file = find_filename_or_new('/my/path/blabla.xml', 'acluinfo',
files=files, dir=dir)
assert file == '/my/path/blabla.acluinfo.1'
def test_find_indices():
dir = '/my/path/'
files = [
'blabla.aclu.2',
'blabla_test.aclu.2',
'blabla_test.spk.4',
'blabla_test2.aclu.2',
'blabla.aclu.9',
'blabla_test3.aclu.3',
'blabla.spk.2',
'blabla_test.spk.2',
]
indices = find_indices('/my/path/blabla_test.xml',
files=files, dir=dir)
assert indices == [2, 4]
def test_triplets():
filename = 'my/path/blabla.aclu.2'
triplet = filename_to_triplet(filename)
filename2 = triplet_to_filename(triplet)
assert filename == filename2
assert triplet_to_filename(triplet[:2] + ('34',)) == \
'my/path/blabla.aclu.34'
def test_clusters():
dir = TEST_FOLDER
clufile = os.path.join(dir, 'test.aclu.1')
clufile2 = os.path.join(dir, 'test.aclu.1.saved')
clusters = read_clusters(clufile)
assert clusters.dtype == np.int32
assert clusters.shape == (1000,)
# Save.
save_clusters(clufile2, clusters)
# Open again.
clusters2 = read_clusters(clufile2)
assert np.array_equal(clusters, clusters2)
# Check the headers.
clusters_with_header = load_text(clufile, np.int32, skiprows=0)
clusters2_with_header = load_text(clufile2, np.int32, skiprows=0)
assert np.array_equal(clusters_with_header, clusters2_with_header)
def test_reorder():
# Generate clusters and permutations.
clusters = np.random.randint(size=1000, low=10, high=100)
clusters_unique = np.unique(clusters)
permutation = clusters_unique[np.random.permutation(len(clusters_unique))]
# Reorder.
clusters_reordered = reorder(clusters, permutation)
# Check.
i = len(clusters_unique) // 2
c = clusters_unique[i]
i_new = np.nonzero(permutation == c)[0][0]
my_clusters = clusters == c
assert np.all(clusters_reordered[my_clusters] == i_new)
def te_SKIP_st_renumber_clusters():
# Create clusters.
clusters = np.random.randint(size=20, low=10, high=100)
clusters_unique = np.unique(clusters)
n = len(clusters_unique)
# Create cluster info.
cluster_info = np.zeros((n, 3), dtype=np.int32)
cluster_info[:, 0] = clusters_unique
cluster_info[:, 1] = np.mod(np.arange(n, dtype=np.int32), 35) + 1
# Set groups.
k = n // 3
cluster_info[:k, 2] = 1
cluster_info[k:2 * n // 3, 2] = 0
cluster_info[2 * k:, 2] = 2
cluster_info[n // 2, 2] = 1
cluster_info = pd.DataFrame({
'color': cluster_info[:, 1],
'group': cluster_info[:, 2]},
dtype=np.int32, index=cluster_info[:, 0])
# Renumber
clusters_renumbered, cluster_info_renumbered = renumber_clusters(clusters,
cluster_info)
# Test.
c0 = clusters_unique[k] # group 0
c1 = clusters_unique[0] # group 1
c2 = clusters_unique[2 * k] # group 2
cm = clusters_unique[n // 2] # group 1
c0next = clusters_unique[k + 1]
c1next = clusters_unique[0 + 1]
c2next = clusters_unique[2 * k + 1]
# New order:
# c0 ... cm-1, cm+1, ..., c2-1, c1, ..., c0-1, cm, c2, ...
assert np.array_equal(clusters == c0, clusters_renumbered == 0 + 2)
assert np.array_equal(clusters == c0next,
clusters_renumbered == 1 + 2)
assert np.array_equal(clusters == c1, clusters_renumbered == k - 1 + 2)
assert np.array_equal(clusters == c1next,
clusters_renumbered == k + 2)
assert np.array_equal(clusters == c2, clusters_renumbered == 2 * k + 2)
assert np.array_equal(clusters == c2next,
clusters_renumbered == 2 * k + 1 + 2)
assert np.array_equal(get_indices(cluster_info_renumbered),
np.arange(n) + 2)
# Increasing groups with the new numbering.
assert np.all(np.diff(get_array(cluster_info_renumbered)[:,1]) >= 0)
assert np.all(select(cluster_info_renumbered, 0 + 2) ==
select(cluster_info, c0))
assert np.all(select(cluster_info_renumbered, 1 + 2) ==
select(cluster_info, c0next))
assert np.all(select(cluster_info_renumbered, k - 1 + 2) ==
select(cluster_info, c1))
assert np.all(select(cluster_info_renumbered, k + 2) ==
select(cluster_info, c1next))
assert np.all(select(cluster_info_renumbered, 2 * k + 2) ==
select(cluster_info, c2))
assert np.all(select(cluster_info_renumbered, 2 * k + 1 + 2) ==
select(cluster_info, c2next))
def test_convert_to_clu():
clusters = np.random.randint(size=1000, low=10, high=100)
clusters0 = clusters == 10
clusters1 = clusters == 20
clusters[clusters0] = 2
clusters[clusters1] = 3
clusters_unique = np.unique(clusters)
n = len(clusters_unique)
cluster_groups = np.random.randint(size=n, low=0, high=4)
noise = np.in1d(clusters, clusters_unique[np.nonzero(cluster_groups == 0)[0]])
mua = np.in1d(clusters, clusters_unique[np.nonzero(cluster_groups == 1)[0]])
cluster_info = pd.DataFrame({'group': cluster_groups,
'color': np.zeros(n, dtype=np.int32)},
index=clusters_unique,
dtype=np.int32)
clusters_new = convert_to_clu(clusters, cluster_info['group'])
assert np.array_equal(clusters_new == 0, noise)
assert np.array_equal(clusters_new == 1, mua)
def test_cluster_info():
dir = TEST_FOLDER
clufile = os.path.join(dir, 'test.aclu.1')
cluinfofile = os.path.join(dir, 'test.acluinfo.1')
clusters = read_clusters(clufile)
indices = np.unique(clusters)
colors = np.random.randint(low=0, high=10, size=len(indices))
groups = np.random.randint(low=0, high=2, size=len(indices))
cluster_info = pd.DataFrame({'color': pd.Series(colors, index=indices),
'group': pd.Series(groups, index=indices)})
save_cluster_info(cluinfofile, cluster_info)
cluster_info2 = read_cluster_info(cluinfofile)
assert np.array_equal(cluster_info.values, cluster_info2.values)
def test_group_info():
dir = TEST_FOLDER
groupinfofile = os.path.join(dir, 'test.groups.1')
group_info = np.zeros((4, 2), dtype=object)
group_info[:,0] = (np.arange(4) + 1)
group_info[:,1] = np.array(['Noise', 'MUA', 'Good', 'Unsorted'],
dtype=object)
group_info = pd.DataFrame(group_info)
save_group_info(groupinfofile, group_info)
group_info2 = read_group_info(groupinfofile)
assert np.array_equal(group_info.values, group_info2.values)
def test_klusters_loader_1():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
# Get full data sets.
features = l.get_features()
# features_some = l.get_some_features()
masks = l.get_masks()
waveforms = l.get_waveforms()
clusters = l.get_clusters()
spiketimes = l.get_spiketimes()
nclusters = len(Counter(clusters))
probe = l.get_probe()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
cluster_sizes = l.get_cluster_sizes()
# Check the shape of the data sets.
# ---------------------------------
assert check_shape(features, (nspikes, nchannels * fetdim + 1))
# assert features_some.shape[1] == nchannels * fetdim + 1
assert check_shape(masks, (nspikes, nchannels))
assert check_shape(waveforms, (nspikes, nsamples, nchannels))
assert check_shape(clusters, (nspikes,))
assert check_shape(spiketimes, (nspikes,))
assert check_shape(probe, (nchannels, 2))
assert check_shape(cluster_colors, (nclusters,))
assert check_shape(cluster_groups, (nclusters,))
assert check_shape(group_colors, (4,))
assert check_shape(group_names, (4,))
assert check_shape(cluster_sizes, (nclusters,))
# Check the data type of the data sets.
# -------------------------------------
assert check_dtype(features, np.float32)
assert check_dtype(masks, np.float32)
# HACK: Panel has no dtype(s) attribute
# assert check_dtype(waveforms, np.float32)
assert check_dtype(clusters, np.int32)
assert check_dtype(spiketimes, np.float32)
assert check_dtype(probe, np.float32)
assert check_dtype(cluster_colors, np.int32)
assert check_dtype(cluster_groups, np.int32)
assert check_dtype(group_colors, np.int32)
assert check_dtype(group_names, object)
assert check_dtype(cluster_sizes, np.int32)
l.close()
def test_klusters_loader_2():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
# Get full data sets.
features = l.get_features()
masks = l.get_masks()
waveforms = l.get_waveforms()
clusters = l.get_clusters()
spiketimes = l.get_spiketimes()
nclusters = len(Counter(clusters))
probe = l.get_probe()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
cluster_sizes = l.get_cluster_sizes()
# Check selection.
# ----------------
index = nspikes / 2
waveform = select(waveforms, index)
cluster = clusters[index]
spikes_in_cluster = np.nonzero(clusters == cluster)[0]
nspikes_in_cluster = len(spikes_in_cluster)
l.select(clusters=[cluster])
# Check the size of the selected data.
# ------------------------------------
assert check_shape(l.get_features(), (nspikes_in_cluster,
nchannels * fetdim + 1))
assert check_shape(l.get_masks(full=True), (nspikes_in_cluster,
nchannels * fetdim + 1))
assert check_shape(l.get_waveforms(),
(nspikes_in_cluster, nsamples, nchannels))
assert check_shape(l.get_clusters(), (nspikes_in_cluster,))
assert check_shape(l.get_spiketimes(), (nspikes_in_cluster,))
# Check waveform sub selection.
# -----------------------------
waveforms_selected = l.get_waveforms()
assert np.array_equal(get_array(select(waveforms_selected, index)),
get_array(waveform))
l.close()
def test_klusters_loader_control():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
# Take all spikes in cluster 3.
spikes = get_indices(l.get_clusters(clusters=3))
# Put them in cluster 4.
l.set_cluster(spikes, 4)
spikes_new = get_indices(l.get_clusters(clusters=4))
# Ensure all spikes in old cluster 3 are now in cluster 4.
assert np.all(np.in1d(spikes, spikes_new))
# Change cluster groups.
clusters = [2, 3, 4]
group = 0
l.set_cluster_groups(clusters, group)
groups = l.get_cluster_groups(clusters)
assert np.all(groups == group)
# Change cluster colors.
clusters = [2, 3, 4]
color = 12
l.set_cluster_colors(clusters, color)
colors = l.get_cluster_colors(clusters)
assert np.all(colors == color)
# Change group name.
group = 0
name = l.get_group_names(group)
name_new = 'Noise new'
assert name == 'Noise'
l.set_group_names(group, name_new)
assert l.get_group_names(group) == name_new
# Change group color.
groups = [1, 2]
colors = l.get_group_colors(groups)
color_new = 10
l.set_group_colors(groups, color_new)
assert np.all(l.get_group_colors(groups) == color_new)
# Add cluster and group.
spikes = get_indices(l.get_clusters(clusters=3))[:10]
# Create new group 100.
l.add_group(100, 'New group', 10)
# Create new cluster 10000 and put it in group 100.
l.add_cluster(10000, 100, 10)
# Put some spikes in the new cluster.
l.set_cluster(spikes, 10000)
clusters = l.get_clusters(spikes=spikes)
assert np.all(clusters == 10000)
groups = l.get_cluster_groups(10000)
assert groups == 100
l.set_cluster(spikes, 2)
# Remove the new cluster and group.
l.remove_cluster(10000)
l.remove_group(100)
assert np.all(~np.in1d(10000, l.get_clusters()))
assert np.all(~np.in1d(100, l.get_cluster_groups()))
l.close()
@with_setup(setup)
def test_klusters_save():
"""WARNING: this test should occur at the end of the module since it
changes the mock data sets."""
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
clusters = l.get_clusters()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
# Set clusters.
indices = get_indices(clusters)
l.set_cluster(indices[::2], 2)
l.set_cluster(indices[1::2], 3)
# Set cluster info.
cluster_indices = l.get_clusters_unique()
l.set_cluster_colors(cluster_indices[::2], 10)
l.set_cluster_colors(cluster_indices[1::2], 20)
l.set_cluster_groups(cluster_indices[::2], 1)
l.set_cluster_groups(cluster_indices[1::2], 0)
# Save.
l.remove_empty_clusters()
l.save()
clusters = read_clusters(l.filename_aclu)
cluster_info = read_cluster_info(l.filename_acluinfo)
assert np.all(clusters[::2] == 2)
assert np.all(clusters[1::2] == 3)
assert np.array_equal(cluster_info.index, cluster_indices)
assert np.all(cluster_info.values[::2, 0] == 10)
assert np.all(cluster_info.values[1::2, 0] == 20)
assert np.all(cluster_info.values[::2, 1] == 1)
assert np.all(cluster_info.values[1::2, 1] == 0)
l.close()
| gpl-3.0 |
zigahertz/2013-Sep-HR-ML-sprint | py/randomforest.py | 1 | 3790 | """ random forest """
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
csv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the training csv file
header = csv_file_object.next() #Skip the fist line as it is a header
train_data=[] #Creat a variable called 'train_data'
for row in csv_file_object: #Skip through each row in the csv file
train_data.append(row[1:]) #adding each row to the data variable
train_data = np.array(train_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
train_data[train_data[0::,3]=='male',3] = 1
train_data[train_data[0::,3]=='female',3] = 0
#embark c=0, s=1, q=2
train_data[train_data[0::,10] =='C',10] = 0
train_data[train_data[0::,10] =='S',10] = 1
train_data[train_data[0::,10] =='Q',10] = 2
#I need to fill in the gaps of the data and make it complete.
#So where there is no price, I will assume price on median of that class
#Where there is no age I will give median of all ages
#All the ages with no data make the median of the data
train_data[train_data[0::,4] == '',4] = np.median(train_data[train_data[0::,4]\
!= '',4].astype(np.float))
#All missing ebmbarks just make them embark from most common place
train_data[train_data[0::,10] == '',10] = np.round(np.mean(train_data[train_data[0::,10]\
!= '',10].astype(np.float)))
train_data = np.delete(train_data,[2,7,9],1) #remove the name data, cabin and ticket
#I need to do the same with the test data now so that the columns are in the same
#as the training data
test_file_object = csv.reader(open('test.csv', 'rb')) #Load in the test csv file
header = test_file_object.next() #Skip the fist line as it is a header
test_data=[] #Creat a variable called 'test_data'
ids = []
for row in test_file_object: #Skip through each row in the csv file
ids.append(row[0])
test_data.append(row[1:]) #adding each row to the data variable
test_data = np.array(test_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
test_data[test_data[0::,2]=='male',2] = 1
test_data[test_data[0::,2]=='female',2] = 0
#ebark c=0, s=1, q=2
test_data[test_data[0::,9] =='C',9] = 0 #Note this is not ideal, in more complex 3 is not 3 tmes better than 1 than 2 is 2 times better than 1
test_data[test_data[0::,9] =='S',9] = 1
test_data[test_data[0::,9] =='Q',9] = 2
#All the ages with no data make the median of the data
test_data[test_data[0::,3] == '',3] = np.median(test_data[test_data[0::,3]\
!= '',3].astype(np.float))
#All missing ebmbarks just make them embark from most common place
test_data[test_data[0::,9] == '',9] = np.round(np.mean(test_data[test_data[0::,9]\
!= '',9].astype(np.float)))
#All the missing prices assume median of their respective class
for i in xrange(np.size(test_data[0::,0])):
if test_data[i,7] == '':
test_data[i,7] = np.median(test_data[(test_data[0::,7] != '') &\
(test_data[0::,0] == test_data[i,0])\
,7].astype(np.float))
test_data = np.delete(test_data,[1,6,8],1) #remove the name data, cabin and ticket
#The data is now ready to go. So lets train then test!
print 'Training '
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit(train_data[0::,1::],\
train_data[0::,0])
print 'Predicting'
output = forest.predict(test_data)
open_file_object = csv.writer(open("randomforest.csv", "wb"))
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
| mit |
Lawrence-Liu/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
andim/scipy | scipy/stats/morestats.py | 3 | 92737 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, angle, count_nonzero)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.1767242068607087, 24.459103821334018))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 1e5
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample
variance, and ``:math:m_i`` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j * ang), axis=axis))
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| bsd-3-clause |
piyush8311/ns3-arp | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
microsoft/task_oriented_dialogue_as_dataflow_synthesis | src/dataflow/onmt_helpers/evaluate_onmt_predictions.py | 1 | 6406 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Evaluates 1best predictions.
Computes both turn-level and dialogue-level accuracy.
"""
import argparse
import csv
import json
from dataclasses import dataclass
from typing import List, Optional, Tuple
import jsons
import pandas as pd
from dataflow.core.dialogue import TurnId
from dataflow.core.io import load_jsonl_file
@dataclass
class EvaluationScores:
num_total_turns: int = 0
num_correct_turns: int = 0
num_turns_before_first_error: int = 0
num_total_dialogues: int = 0
num_correct_dialogues: int = 0
@property
def accuracy(self) -> float:
if self.num_total_turns == 0:
return 0
return self.num_correct_turns / self.num_total_turns
@property
def ave_num_turns_before_first_error(self) -> float:
if self.num_total_dialogues == 0:
return 0
return self.num_turns_before_first_error / self.num_total_dialogues
@property
def pct_correct_dialogues(self) -> float:
if self.num_total_dialogues == 0:
return 0
return self.num_correct_dialogues / self.num_total_dialogues
def __iadd__(self, other: object) -> "EvaluationScores":
if not isinstance(other, EvaluationScores):
raise ValueError()
self.num_total_turns += other.num_total_turns
self.num_correct_turns += other.num_correct_turns
self.num_turns_before_first_error += other.num_turns_before_first_error
self.num_total_dialogues += other.num_total_dialogues
self.num_correct_dialogues += other.num_correct_dialogues
return self
def __add__(self, other: object) -> "EvaluationScores":
if not isinstance(other, EvaluationScores):
raise ValueError()
result = EvaluationScores()
result += self
result += other
return result
def evaluate_dialogue(turns: List[Tuple[int, bool]]) -> EvaluationScores:
num_correct_turns = 0
dialogue_is_correct = True
num_turns_before_first_error = 0
seen_error = False
for _turn_index, is_correct in sorted(turns, key=lambda x: x[0]):
if is_correct:
num_correct_turns += 1
if not seen_error:
num_turns_before_first_error += 1
else:
dialogue_is_correct = False
seen_error = True
return EvaluationScores(
num_total_turns=len(turns),
num_correct_turns=num_correct_turns,
num_turns_before_first_error=num_turns_before_first_error,
num_total_dialogues=1,
num_correct_dialogues=1 if dialogue_is_correct else 0,
)
def evaluate_dataset(
prediction_report_df: pd.DataFrame, field_name: str,
) -> EvaluationScores:
# pylint: disable=singleton-comparison
dataset_scores = EvaluationScores()
for _dialogue_id, df_for_dialogue in prediction_report_df.groupby("dialogueId"):
turns = [
(int(row.get("turnIndex")), row.get(field_name))
for _, row in df_for_dialogue.iterrows()
]
dialogue_scores = evaluate_dialogue(turns)
dataset_scores += dialogue_scores
return dataset_scores
def main(
prediction_report_tsv: str,
datum_ids_jsonl: Optional[str],
use_leaderboard_metric: bool,
scores_json: str,
) -> None:
prediction_report_df = pd.read_csv(
prediction_report_tsv,
sep="\t",
encoding="utf-8",
quoting=csv.QUOTE_ALL,
na_values=None,
keep_default_na=False,
)
assert not prediction_report_df.isnull().any().any()
if datum_ids_jsonl:
datum_ids = set(
load_jsonl_file(data_jsonl=datum_ids_jsonl, cls=TurnId, verbose=False)
)
mask_datum_id = [
TurnId(dialogue_id=row.get("dialogueId"), turn_index=row.get("turnIndex"))
in datum_ids
for _, row in prediction_report_df.iterrows()
]
prediction_report_df = prediction_report_df.loc[mask_datum_id]
if use_leaderboard_metric:
scores_not_ignoring_refer = evaluate_dataset(
prediction_report_df, "isCorrectLeaderboard"
)
scores_ignoring_refer = evaluate_dataset(
prediction_report_df, "isCorrectLeaderboardIgnoringRefer"
)
else:
scores_not_ignoring_refer = evaluate_dataset(prediction_report_df, "isCorrect")
scores_ignoring_refer = evaluate_dataset(
prediction_report_df, "isCorrectIgnoringRefer"
)
scores_dict = {
"notIgnoringRefer": jsons.dump(scores_not_ignoring_refer),
"ignoringRefer": jsons.dump(scores_ignoring_refer),
}
with open(scores_json, "w") as fp:
fp.write(json.dumps(scores_dict, indent=2))
fp.write("\n")
def add_arguments(argument_parser: argparse.ArgumentParser) -> None:
argument_parser.add_argument(
"--prediction_report_tsv", help="the prediction report tsv file"
)
argument_parser.add_argument(
"--datum_ids_jsonl", default=None, help="if set, only evaluate on these turns",
)
argument_parser.add_argument(
"--use_leaderboard_metric",
default=False,
action="store_true",
help="if set, use the isCorrectLeaderboard field instead of isCorrect field in the prediction report",
)
argument_parser.add_argument("--scores_json", help="output scores json file")
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
if not args.use_leaderboard_metric:
print(
"WARNING: The flag --use_leaderboard_metric is not set."
" The reported results will be consistent with the numbers"
" reported in the TACL2020 paper. To report on the leaderboard evaluation metric, please use"
" --use_leaderboard_metric, which canonicalizes the labels and predictions."
)
main(
prediction_report_tsv=args.prediction_report_tsv,
datum_ids_jsonl=args.datum_ids_jsonl,
use_leaderboard_metric=args.use_leaderboard_metric,
scores_json=args.scores_json,
)
| mit |
ningchi/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
chatcannon/scipy | scipy/signal/windows.py | 20 | 54134 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import fftpack, linalg, special
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fftpack.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fftpack.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True (default), create a "periodic" window, ready to use with
`ifftshift` and be multiplied by the result of an FFT (see also
`fftpack.fftfreq`).
If False, create a "symmetric" window, for use in filter design.
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
`boxcar`, `triang`, `blackman`, `hamming`, `hann`, `bartlett`,
`flattop`, `parzen`, `bohman`, `blackmanharris`, `nuttall`,
`barthann`, `kaiser` (needs beta), `gaussian` (needs standard
deviation), `general_gaussian` (needs power, width), `slepian`
(needs width), `chebwin` (needs attenuation), `exponential`
(needs decay scale), `tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the `kaiser` window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
TheArbiter/Networks | lab4/lab4exercise1/util/helper.py | 6 | 3381 | '''
Helper module for the plot scripts.
'''
import re
import itertools
import matplotlib as m
import os
if os.uname()[0] == "Darwin":
m.use("MacOSX")
else:
m.use("Agg")
import matplotlib.pyplot as plt
import argparse
import math
def read_list(fname, delim=','):
lines = open(fname).xreadlines()
ret = []
for l in lines:
ls = l.strip().split(delim)
ls = map(lambda e: '0' if e.strip() == '' or e.strip() == 'ms' or e.strip() == 's' else e, ls)
ret.append(ls)
return ret
def ewma(alpha, values):
if alpha == 0:
return values
ret = []
prev = 0
for v in values:
prev = alpha * prev + (1 - alpha) * v
ret.append(prev)
return ret
def col(n, obj = None, clean = lambda e: e):
"""A versatile column extractor.
col(n, [1,2,3]) => returns the nth value in the list
col(n, [ [...], [...], ... ] => returns the nth column in this matrix
col('blah', { ... }) => returns the blah-th value in the dict
col(n) => partial function, useful in maps
"""
if obj == None:
def f(item):
return clean(item[n])
return f
if type(obj) == type([]):
if len(obj) > 0 and (type(obj[0]) == type([]) or type(obj[0]) == type({})):
return map(col(n, clean=clean), obj)
if type(obj) == type([]) or type(obj) == type({}):
try:
return clean(obj[n])
except:
print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
# We wouldn't know what to do here, so just return None
print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
def transpose(l):
return zip(*l)
def avg(lst):
return sum(map(float, lst)) / len(lst)
def stdev(lst):
mean = avg(lst)
var = avg(map(lambda e: (e - mean)**2, lst))
return math.sqrt(var)
def xaxis(values, limit):
l = len(values)
return zip(*map(lambda (x,y): (x*1.0*limit/l, y), enumerate(values)))
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def cdf(values):
values.sort()
prob = 0
l = len(values)
x, y = [], []
for v in values:
prob += 1.0 / l
x.append(v)
y.append(prob)
return (x, y)
def parse_cpu_usage(fname, nprocessors=8):
"""Returns (user,system,nice,iowait,hirq,sirq,steal) tuples
aggregated over all processors. DOES NOT RETURN IDLE times."""
data = grouper(nprocessors, open(fname).readlines())
"""Typical line looks like:
Cpu0 : 0.0%us, 1.0%sy, 0.0%ni, 97.0%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st
"""
ret = []
for collection in data:
total = [0]*8
for cpu in collection:
usages = cpu.split(':')[1]
usages = map(lambda e: e.split('%')[0],
usages.split(','))
for i in xrange(len(usages)):
total[i] += float(usages[i])
total = map(lambda t: t/nprocessors, total)
# Skip idle time
ret.append(total[0:3] + total[4:])
return ret
def pc95(lst):
l = len(lst)
return sorted(lst)[ int(0.95 * l) ]
def pc99(lst):
l = len(lst)
return sorted(lst)[ int(0.99 * l) ]
def coeff_variation(lst):
return stdev(lst) / avg(lst)
| gpl-3.0 |
jklenzing/pysat | pysat/ssnl/plot.py | 2 | 3420 | from __future__ import print_function
from __future__ import absolute_import
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import warnings
def scatterplot(inst, labelx, labely, data_label, datalim, xlim=None,
ylim=None):
"""Return scatterplot of data_label(s) as functions of labelx,y over a
season.
Parameters
----------
labelx : string
data product for x-axis
labely : string
data product for y-axis
data_label : string, array-like of strings
data product(s) to be scatter plotted
datalim : numyp array
plot limits for data_label
Returns
-------
Returns a list of scatter plots of data_label as a function
of labelx and labely over the season delineated by start and
stop datetime objects.
"""
warnings.warn(' '.join(["This function is deprecated here and will be",
"removed in pysat 3.0.0. Please use",
"pysatSeasons instead:"
"https://github.com/pysat/pysatSeasons"]),
DeprecationWarning, stacklevel=2)
if mpl.is_interactive():
interactive_mode = True
# turn interactive plotting off
plt.ioff()
else:
interactive_mode = False
# create figures for plotting
figs = []
axs = []
# Check for list-like behaviour of data_label
if type(data_label) is str:
data_label = [data_label]
# multiple data to be plotted
for i in np.arange(len(data_label)):
figs.append(plt.figure())
ax1 = figs[i].add_subplot(211, projection='3d')
ax2 = figs[i].add_subplot(212)
axs.append((ax1, ax2))
plt.suptitle(data_label[i])
if xlim is not None:
ax1.set_xlim(xlim)
ax2.set_xlim(xlim)
if ylim is not None:
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
# norm method so that data may be scaled to colors appropriately
norm = mpl.colors.Normalize(vmin=datalim[0], vmax=datalim[1])
p = [i for i in np.arange(len(figs))]
q = [i for i in np.arange(len(figs))]
for i, inst in enumerate(inst):
for j, (fig, ax) in enumerate(zip(figs, axs)):
if not inst.empty:
check1 = len(inst.data[labelx]) > 0
check2 = len(inst.data[labely]) > 0
check3 = len(inst.data[data_label[j]]) > 0
if (check1 & check2 & check3):
p[j] = ax[0].scatter(inst.data[labelx], inst.data[labely],
inst.data[data_label[j]], zdir='z',
c=inst.data[data_label[j]], norm=norm,
linewidth=0, edgecolors=None)
q[j] = ax[1].scatter(inst.data[labelx], inst.data[labely],
c=inst.data[data_label[j]],
norm=norm, alpha=0.5, edgecolor=None)
for j, (fig, ax) in enumerate(zip(figs, axs)):
try:
plt.colorbar(p[j], ax=ax[0], label='Amplitude (m/s)')
except:
print('Tried colorbar but failed, thus no colorbar.')
ax[0].elev = 30.
if interactive_mode:
# turn interactive plotting back on
plt.ion()
return figs
| bsd-3-clause |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/patches.py | 69 | 110325 | # -*- coding: utf-8 -*-
from __future__ import division
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
import matplotlib.colors as colors
import matplotlib.transforms as transforms
from matplotlib.path import Path
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
artist.kwdocd['Patch'] = """
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
"""
class Patch(artist.Artist):
"""
A patch is a 2D thingy with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
def __str__(self):
return str(self.__class__).split('.')[-1]
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bézier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if callable(self._contains): return self._contains(self,mouseevent)
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform())
return inside, {}
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
return artist.Artist.get_transform(self)
def get_patch_transform(self):
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None: aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None: color = mpl.rcParams['patch.edgecolor']
self._edgecolor = color
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None: color = mpl.rcParams['patch.facecolor']
self._facecolor = color
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None: w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None: ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self.fill = b
def get_fill(self):
'return whether fill is set'
return self.fill
def set_hatch(self, h):
"""
Set the hatching pattern
hatch can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
# - crossed
x - crossed diagonal
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching in that direction.
CURRENT LIMITATIONS:
1. Hatching is supported in the PostScript backend only.
2. Hatching is done with solid black lines of width 0.
ACCEPTS: [ '/' | '\\' | '|' | '-' | '#' | 'x' ]
"""
self._hatch = h
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible(): return
#renderer.open_group('patch')
gc = renderer.new_gc()
if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='none':
gc.set_linewidth(0)
else:
gc.set_foreground(self._edgecolor)
gc.set_linewidth(self._linewidth)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('projecting')
gc.set_url(self._url)
gc.set_snap(self._snap)
if (not self.fill or self._facecolor is None or
(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='none')):
rgbFace = None
gc.set_alpha(1.0)
else:
r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)
rgbFace = (r, g, b)
gc.set_alpha(a)
if self._hatch:
gc.set_hatch(self._hatch )
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
renderer.draw_path(gc, tpath, affine, rgbFace)
#renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
artist.kwdocd['Patch'] = patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch'):
artist.kwdocd[k] = patchdoc
# define Patch.__init__ after the class so that the docstring can be
# auto-generated.
def __patch__init__(self,
edgecolor=None,
facecolor=None,
linewidth=None,
linestyle=None,
antialiased = None,
hatch = None,
fill=True,
**kwargs
):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None: linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None: linestyle = "solid"
if antialiased is None: antialiased = mpl.rcParams['patch.antialiased']
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.fill = fill
self._combined_transform = transforms.IdentityTransform()
if len(kwargs): artist.setp(self, **kwargs)
__patch__init__.__doc__ = cbook.dedent(__patch__init__.__doc__) % artist.kwdocd
Patch.__init__ = __patch__init__
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)"%(str(self.patch))
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._update_transform()
self._update()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r,g,b,a = colors.colorConverter.to_rgba(self.patch.get_facecolor())
rho = 0.3
r = rho*r
g = rho*g
b = rho*b
self.set_facecolor((r,g,b,0.5))
self.set_edgecolor((r,g,b,0.5))
def _update_transform(self):
self._shadow_transform = transforms.Affine2D().translate(self._ox, self._oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
self._update_transform()
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
self._update_transform()
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
def __init__(self, xy, width, height, **kwargs):
"""
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
self._rect_transform = transforms.BboxTransformTo(bbox)
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width==0 or self._height==0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args)==0:
l,b,w,h = args[0]
else:
l,b,w,h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)"%(self._numVertices,self._xy[0],self._xy[1])
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, xy):
self._orientation = xy
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, xy):
self._radius = xy
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`:
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`:
For additional kwargs
"""
Patch.__init__(self, **kwargs)
xy = np.asarray(xy, np.float_)
self._path = Path(xy)
self.set_closed(closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
def get_closed(self):
return self._closed
def set_closed(self, closed):
self._closed = closed
xy = self._get_xy()
if closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy)>2 and (xy[0]==xy[-1]).all():
xy = xy[0:-1]
self._set_xy(xy)
def get_xy(self):
return self._path.vertices
def set_xy(self, vertices):
self._path = Path(vertices)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)"%(self.theta1,self.theta2)
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r,self.width = r,width
self.theta1,self.theta2 = theta1,theta2
# Inner and outer rings are connected unless the annulus is complete
delta=theta2-theta1
if abs((theta2-theta1) - 360) <= 1e-12:
theta1,theta2 = 0,360
connector = Path.MOVETO
else:
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1,theta2)
if width is not None:
# Partial annulus needs to draw the outter ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1]*float(r-width)/r
v = np.vstack([v1,v2,v1[0,:],(0,0)])
c = np.hstack([arc.codes,arc.codes,connector,Path.CLOSEPOLY])
c[len(arc.codes)]=connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices,[(0,0),arc.vertices[0,:],(0,0)]])
c = np.hstack([arc.codes,[connector,connector,Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= r
v += np.asarray(center)
self._path = Path(v,c)
self._patch_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path( [
[ 0.0, 0.1 ], [ 0.0, -0.1],
[ 0.8, -0.1 ], [ 0.8, -0.3],
[ 1.0, 0.0 ], [ 0.8, 0.3],
[ 0.8, 0.1 ], [ 0.0, 0.1] ] )
def __init__( self, x, y, dx, dy, width=1.0, **kwargs ):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx**2+dy**2) or 1 # account for div by zero
cx = float(dx)/L
sx = float(dy)/L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False, \
head_width=None, head_length=None, shape='full', overhang=0, \
head_starts_at_zero=False,**kwargs):
"""
Constructor arguments
*length_includes_head*:
*True* if head is counted in calculating the length.
*shape*: ['full', 'left', 'right']
*overhang*:
distance that the arrow is swept back (0 overhang means
triangular shape).
*head_starts_at_zero*:
If *True*, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Valid kwargs are:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx**2 + dy**2)
if length_includes_head:
length=distance
else:
length=distance+head_length
if not length:
verts = [] #display nothing if empty
else:
#start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0,0.0], #tip
[-hl, -hw/2.0], #leftmost
[-hl*(1-hs), -lw/2.0], #meets stem
[-length, -lw/2.0], #bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length/2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow*[1,-1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords=np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError, "Got unknown shape: %s" % shape
cx = float(dx)/distance
sx = float(dy)/distance
M = np.array([[cx, sx],[-sx,cx]])
verts = np.dot(coords, M) + (x+dx, y+dy)
Polygon.__init__(self, map(tuple, verts), **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
def __init__(self, figure, xytip, xybase, width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.figure = figure
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width*self.figure.dpi/72./2.
k2 = self.headwidth*self.figure.dpi/72./2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2-y1, x2-x1)
r = math.sqrt((y2-y1)**2. + (x2-x1)**2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(zip(xs, ys))
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1,y1,x2,y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1,y1,x2,y2,k = map(float, (x1,y1,x2,y2,k))
if y2-y1 == 0:
return x2, y2+k, x2, y2-k
elif x2-x1 == 0:
return x2+k, y2, x2-k, y2
m = (y2-y1)/(x2-x1)
pm = -1./m
a = 1
b = -2*y2
c = y2**2. - k**2.*pm**2./(1. + pm**2.)
y3a = (-b + math.sqrt(b**2.-4*a*c))/(2.*a)
x3a = (y3a - y2)/pm + x2
y3b = (-b - math.sqrt(b**2.-4*a*c))/(2.*a)
x3b = (y3b - y2)/pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)"%self.center
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
**kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)"%(self.center[0],self.center[1],self.width,self.height)
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self,ev):
if ev.x is None or ev.y is None: return False,{}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x*x + y*y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)"%(self.center[0],self.center[1],self.radius)
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
if 'resolution' in kwargs:
import warnings
warnings.warn('Circle is now scale free. Use CirclePolygon instead!', DeprecationWarning)
kwargs.pop('resolution')
self.radius = radius
Ellipse.__init__(self, xy, radius*2, radius*2, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)"%(self.center[0],self.center[1],self.width,self.height)
def __init__(self, xy, width, height, angle=0.0, theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.pop('fill')
if fill:
raise ValueError("Arc objects can not be filled")
kwargs['fill'] = False
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx*dx + dy*dy
D = x0*y1 - x1*y0
D2 = D*D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D*dy) / dr2
y = (-D*dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D*dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D*dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = thetas.keys()
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad), np.sin(theta1_rad)))
for theta in thetas:
if inside:
self._path = Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l,b,w,h = bbox.get_bounds()
r = Rectangle(xy=(l,b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None: r.set_transform(trans)
r.set_clip_on( False )
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl) for cell, cl in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl) for cell, cl in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles, leadingspace=2):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault) \
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av)) for an, av in args])
#adding quotes for now to work around tex bug treating '-' as itemize
_table.append([cls.__name__, "'%s'"%name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ","").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0/aspect_ratio, height/aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:,1] = vertices[:,1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0), (x1, y0), (x1, y1), (x0, y1),
(x0, y0), (x0, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["square"] = Square
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
dx = (y1-y0)/2.
dxx = dx*.5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0+dxx, y0), (x1, y0), (x1, y1), (x0+dxx, y1),
(x0+dxx, y1+dxx), (x0-dx, y0+dx), (x0+dxx, y0-dxx), # arrow
(x0+dxx, y0), (x0+dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.RArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:,0] = 2*x0 + width - p.vertices[:,0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2.*pad, \
height + 2.*pad,
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
# Round corners are implemented as quadratic bezier. eg.
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0+dr, y0),
(x1-dr, y0),
(x1, y0), (x1, y0+dr),
(x1, y1-dr),
(x1, y1), (x1-dr, y1),
(x0+dr, y1),
(x0, y1), (x0, y1-dr),
(x0, y0+dr),
(x0, y0), (x0+dr, y0),
(x0+dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2.*pad - 2*dr, \
height + 2.*pad - 2*dr,
x0, y0 = x0-pad+dr, y0-pad+dr,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0),
(x0+dr, y0-dr), (x1-dr, y0-dr), (x1, y0),
(x1+dr, y0+dr), (x1+dr, y1-dr), (x1, y1),
(x1-dr, y1+dr), (x0+dr, y1+dr), (x0, y1),
(x0-dr, y1-dr), (x0-dr, y0+dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2.*pad - tooth_size, \
height + 2.*pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0-pad+tooth_size2, y0-pad+tooth_size2
x1, y1 = x0+width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx*.5* i for i in range(dsx_n*2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0, y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2, x1, x1 - tooth_size2, x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy*.5* i for i in range(dsy_n*2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx*.5* i for i in range(dsx_n*2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2, y1, y1 - tooth_size2, y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2, x0, x0 + tooth_size2, x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy*.5* i for i in range(dsy_n*2)] + \
[y0 + tooth_size2]
saw_vertices = zip(bottom_saw_x, bottom_saw_y) + \
zip(right_saw_x, right_saw_y) + \
zip(top_saw_x, top_saw_y) + \
zip(left_saw_x, left_saw_y) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)
path = Path(saw_vertices)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""
A roundtooth(?) box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)
cp = [Path.MOVETO] + ([Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2))
path = Path(saw_vertices, cp)
return path
_style_list["roundtooth"] = Roundtooth
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "FancyBboxPatch(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale=mutation_scale
self._mutation_aspect=mutation_aspect
kwdoc = dict()
kwdoc["AvailableBoxstyles"]=_pprint_styles(BoxStyle._style_list)
kwdoc.update(artist.kwdocd)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % kwdoc
del kwdoc
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: [ %(AvailableBoxstyles)s ]
"""
if boxstyle==None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
kwdoc = dict()
kwdoc["AvailableBoxstyles"]=_pprint_styles(BoxStyle._style_list)
kwdoc.update(artist.kwdocd)
set_boxstyle.__doc__ = cbook.dedent(set_boxstyle.__doc__) % kwdoc
del kwdoc
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale=scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect=aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args)==0:
l,b,w,h = args[0]
else:
l,b,w,h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB, patchA=None, patchB=None, shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
#xy_display = patchA.get_data_transform().transform_point(xy_data)
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
#xy_display = patchB.get_data_transform().transform_point(xy_data)
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
left, right = split_path_inout(path, insideA)
path = right
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
left, right = split_path_inout(path, insideB)
path = left
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2)/2., (y1 + y2)/2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f*dy, y12 - f*dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA/180.*math.pi),\
math.sin(self.angleA/180.*math.pi),
cosB, sinB = math.cos(self.angleB/180.*math.pi),\
math.sin(self.angleB/180.*math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA/180.*math.pi),\
math.sin(self.angleA/180.*math.pi),
cosB, sinB = math.cos(self.angleB/180.*math.pi),\
-math.sin(self.angleB/180.*math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
vertices.extend([(cx - self.rad * cosA, cy - self.rad * sinA),
(cx, cy),
(cx + self.rad * cosB, cy + self.rad * sinB)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA/180.*math.pi)
sinA = math.sin(self.angleA/180.*math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d*cosA, y1 + d*sinA))
d = self.armA
rounded.append((x1 + d*cosA, y1 + d*sinA))
if self.armB:
cosB = math.cos(self.angleB/180.*math.pi)
sinB = math.sin(self.angleB/180.*math.pi)
x_armB, y_armB = x2 + self.armB*cosB, y2 + self.armB*sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx*dx + dy*dy)**.5
rounded.append((xp + self.rad*dx/dd, yp + self.rad*dy/dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx*dx + dy*dy)**.5
d = dd - self.rad
rounded = [(xp + d*dx/dd, yp + d*dy/dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx*dx + dy*dy)**.5
rounded.append((xp + self.rad*dx/dd, yp + self.rad*dy/dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives the
path object along which the arrow will be drawn, and the
mutation_size, with which the amount arrow head and etc. will
be scaled. It returns a Path instance. The linewidth may be
used to adjust the the path so that it does not pass beyond
the given points.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:,1] = vertices[:,1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, closed = self.transmute(path_shrinked, linewidth,
mutation_size)
vertices, codes = path_mutated.vertices, path_mutated.codes
# Restore the height
vertices[:,1] = vertices[:,1] * aspect_ratio
return Path(vertices, codes), closed
else:
return self.transmute(path, mutation_size, linewidth)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow at the begin point and the
at the end point. The returned path is not closed and only meant
to be stroked.
"""
def __init__(self, beginarrow=None, endarrow=None,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size of
the arrow relative to the *mutation scale*.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
super(ArrowStyle._Curve, self).__init__()
def _get_pad_projected(self, x0, y0, x1, y1, linewidth):
# when no arrow head is drawn
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx**2 + dy**2)
# padx_projected, pady_projected : amount of pad to account
# projection of the wedge
padx_projected = (.5*linewidth)
pady_projected = (.5*linewidth)
# apply pad for projected edge
ddx = padx_projected * dx / cp_distance
ddy = pady_projected * dy / cp_distance
return ddx, ddy
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow is goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx**2 + dy**2)
# padx_projected, pady_projected : amount of pad for account
# the overshooting of the projection of the wedge
padx_projected = (.5*linewidth / cos_t)
pady_projected = (.5*linewidth / sin_t)
# apply pad for projected edge
ddx = padx_projected * dx / cp_distance
ddy = pady_projected * dy / cp_distance
# offset for arrow wedge
dx, dy = dx / cp_distance * head_dist, dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1+ddx+dx1, y1+ddy+dy1),
(x1+ddx, y1++ddy),
(x1+ddx+dx2, y1+ddy+dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length**2 + head_width**2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
#ddxA, ddyA = self._get_pad_projected(x1, y1, x0, y0, linewidth)
ddxA, ddyA = 0., 0., #self._get_pad_projected(x1, y1, x0, y0, linewidth)
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0. #self._get_pad_projected(x2, y2, x3, y3, linewidth)
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
vertices = np.concatenate([verticesA + [(x0+ddxA, y0+ddyA)],
path.vertices[1:-1],
[(x3+ddxB, y3+ddyB)] + verticesB])
codes = np.concatenate([codesA,
path.codes,
codesB])
p = Path(vertices, codes)
return p, False
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__( \
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__( \
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width )
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__( \
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width )
#_style_list["->"] = CurveB
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__( \
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width )
#_style_list["<->"] = CurveAB
_style_list["<->"] = CurveAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB= scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1+dx, y1+dy),
(x1, y1),
(x2, y2),
(x2+dx, y2+dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA*scaleA,
self.legnthA*scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB*scaleB,
self.lengthB*scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=None )
#_style_list["-["] = BracketB
_style_list["-["] = BracketB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(arrow_in, head_width/2.,
wm=.5)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out, tail_width/2.)
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
# path for head
in_f = inside_circle(x2, y2, head_length)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length*.8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head, head_width/2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width*.5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width*.3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
patch_path2 = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.CURVE3, tail_start),
(Path.CURVE3, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
return self.__class__.__name__ \
+ "FancyArrowPatch(%g,%g,%g,%g,%g,%g)" % tuple(self._q_bezier)
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale=mutation_scale
self._mutation_aspect=mutation_aspect
#self._draw_in_display_coordinate = True
kwdoc = dict()
kwdoc["AvailableArrowstyles"]=_pprint_styles(ArrowStyle._style_list)
kwdoc["AvailableConnectorstyles"]=_pprint_styles(ConnectionStyle._style_list)
kwdoc.update(artist.kwdocd)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % kwdoc
del kwdoc
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None: self._posA_posB[0] = posA
if posB is not None: self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle==None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle==None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ConnectionStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale=scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect=aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() medthod to retrieve the arrow path
in the disaply coord.
"""
_path = self.get_path_in_displaycoord()
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA,
shrinkB=self.shrinkB
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, closed = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth(),
self.get_mutation_aspect()
)
if not closed:
self.fill = False
return _path
def draw(self, renderer):
if not self.get_visible(): return
#renderer.open_group('patch')
gc = renderer.new_gc()
fill_orig = self.fill
path = self.get_path_in_displaycoord()
affine = transforms.IdentityTransform()
if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='none':
gc.set_linewidth(0)
else:
gc.set_foreground(self._edgecolor)
gc.set_linewidth(self._linewidth)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
if (not self.fill or self._facecolor is None or
(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='none')):
rgbFace = None
gc.set_alpha(1.0)
else:
r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)
rgbFace = (r, g, b)
gc.set_alpha(a)
if self._hatch:
gc.set_hatch(self._hatch )
renderer.draw_path(gc, path, affine, rgbFace)
self.fill = fill_orig
#renderer.close_group('patch')
| gpl-3.0 |
larsoner/mne-python | examples/visualization/plot_3d_to_2d.py | 15 | 4941 | """
.. _ex-electrode-pos-2d:
====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
:class:`mne.viz.ClickableImage` class to choose our own electrode positions
on the image.
"""
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import (plot_alignment, snapshot_brain_montage,
set_3d_view)
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data
# ---------
#
# First we will load a sample ECoG dataset which we'll use for generating
# a 2D snapshot.
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode coordinates in meters
# Now we make a montage stating that the sEEG contacts are in head
# coordinate system (although they are in MRI). This is compensated
# by the fact that below we do not specicty a trans file so the Head<->MRI
# transform is the identity.
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
coord_frame='head')
info = mne.create_info(ch_names, 1000., 'ecog').set_montage(montage)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Project 3D electrodes to a 2D snapshot
# --------------------------------------
#
# Because we have the 3D location of each electrode, we can use the
# :func:`mne.viz.snapshot_brain_montage` function to return a 2D image along
# with the electrode positions on that image. We use this in conjunction with
# :func:`mne.viz.plot_alignment`, which visualizes electrode positions.
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'], meg=False)
set_3d_view(figure=fig, azimuth=200, elevation=70)
xy, im = snapshot_brain_montage(fig, montage)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
###############################################################################
# Manually creating 2D electrode positions
# ----------------------------------------
#
# If we don't have the 3D electrode positions then we can still create a
# 2D representation of the electrodes. Assuming that you can see the electrodes
# on the 2D image, we can use :class:`mne.viz.ClickableImage` to open the image
# interactively. You can click points on the image and the x/y coordinate will
# be stored.
#
# We'll open an image file, then use ClickableImage to
# return 2D locations of mouse clicks (or load a file already created).
# Then, we'll return these xy positions as a layout for use with plotting topo
# maps.
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
| bsd-3-clause |
PmagPy/PmagPy | programs/conversion_scripts2/bgc_magic2.py | 3 | 8598 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import str
from past.utils import old_div
import pandas as pd
import sys
import os
import numpy as np
import pmagpy.pmag as pmag
def main(command_line=True, **kwargs):
"""
NAME
bgc_magic.py
DESCRIPTION
converts Berkeley Geochronology Center (BGC) format files to magic_measurements format files
SYNTAX
bgc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-loc LOCNAME : specify location/study name
-site SITENAME : specify site name
-A: don't average replicate measurements
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume in cc of the sample, default 2.5^3cc. Will use vol in data file if volume!=0 in file.
INPUT
BGC paleomag format file
"""
# initialize some stuff
noave = 0
volume = 0.025**3 #default volume is a 2.5cm cube
#inst=""
#samp_con,Z='1',""
#missing=1
#demag="N"
er_location_name = "unknown"
er_site_name = "unknown"
#citation='This study'
args = sys.argv
meth_code = "LP-NO"
#specnum=1
version_num = pmag.get_version()
mag_file = ""
dir_path = '.'
MagRecs = []
SampOuts = []
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-site" in args:
ind = args.index("-site")
er_site_name = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-v" in args:
ind = args.index("-v")
volume = float(args[ind+1]) * 1e-6 # enter volume in cc, convert to m^3
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
er_location_name = kwargs.get('er_location_name', '')
er_site_name = kwargs.get('er_site_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 0.025**3 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
# format variables
if not mag_file:
return False, 'You must provide a BCG format file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
samp_file = os.path.join(output_dir_path, samp_file)
ErSampRec = {}
# parse data
# Open up the BGC file and read the header information
print('mag_file in bgc_magic', mag_file)
pre_data = open(mag_file, 'r')
line = pre_data.readline()
line_items = line.split(' ')
sample_name = line_items[2]
sample_name = sample_name.replace('\n', '')
line = pre_data.readline()
line = pre_data.readline()
line_items = line.split('\t')
sample_azimuth = float(line_items[1])
sample_dip = float(line_items[2])
sample_bed_dip = line_items[3]
sample_bed_azimuth = line_items[4]
sample_lon = line_items[5]
sample_lat = line_items[6]
tmp_volume = line_items[7]
if tmp_volume != 0.0:
volume = float(tmp_volume) * 1e-6
pre_data.close()
data = pd.read_csv(mag_file, sep='\t', header=3, index_col=False)
cart = np.array([data['X'], data['Y'], data['Z']]).transpose()
direction = pmag.cart2dir(cart).transpose()
data['measurement_dec'] = direction[0]
data['measurement_inc'] = direction[1]
data['measurement_magn_moment'] = old_div(direction[2], 1000) # the data are in EMU - this converts to Am^2
data['measurement_magn_volume'] = old_div((old_div(direction[2], 1000)), volume) # EMU - data converted to A/m
# Configure the er_sample table
ErSampRec['er_sample_name'] = sample_name
ErSampRec['sample_azimuth'] = sample_azimuth
ErSampRec['sample_dip'] = sample_dip
ErSampRec['sample_bed_dip_direction'] = sample_bed_azimuth
ErSampRec['sample_bed_dip'] = sample_bed_dip
ErSampRec['sample_lat'] = sample_lat
ErSampRec['sample_lon'] = sample_lon
ErSampRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SampOuts.append(ErSampRec.copy())
# Configure the magic_measurements table
for rowNum, row in data.iterrows():
MagRec = {}
MagRec['measurement_description'] = 'Date: ' + str(row['Date']) + ' Time: ' + str(row['Time'])
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["treatment_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["measurement_number"] = rowNum
MagRec["er_specimen_name"] = sample_name
MagRec["treatment_ac_field"] = '0'
if row['DM Val'] == '0':
meas_type = "LT-NO"
elif int(row['DM Type']) > 0.0:
meas_type = "LT-AF-Z"
treat = float(row['DM Val'])
MagRec["treatment_ac_field"] = '%8.3e' %(treat*1e-3) # convert from mT to tesla
elif int(row['DM Type']) == -1:
meas_type = "LT-T-Z"
treat = float(row['DM Val'])
MagRec["treatment_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
else:
print("measurement type unknown:", row['DM Type'], " in row ", rowNum)
MagRec["measurement_magn_moment"] = str(row['measurement_magn_moment'])
MagRec["measurement_magn_volume"] = str(row['measurement_magn_volume'])
MagRec["measurement_dec"] = str(row['measurement_dec'])
MagRec["measurement_inc"] = str(row['measurement_inc'])
MagRec['magic_method_codes'] = meas_type
MagRec['measurement_csd'] = '0.0' # added due to magic.write error
MagRec['measurement_positions'] = '1' # added due to magic.write error
MagRecs.append(MagRec.copy())
pmag.magic_write(samp_file, SampOuts, 'er_samples')
print("sample orientations put in ", samp_file)
MagOuts = pmag.measurements_methods(MagRecs, noave)
pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
print("results put in ", meas_file)
return True, meas_file
def do_help():
return main.__doc__
if __name__ == "__main__":
main()
| bsd-3-clause |
jpinedaf/pyspeckit | pyspeckit/spectrum/plotters.py | 4 | 35855 | """
=======
Plotter
=======
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
"""
from __future__ import print_function
import matplotlib
import matplotlib.figure
import numpy as np
import astropy.units as u
import copy
import inspect
from astropy import log
# this mess is to handle a nested hell of different versions of matplotlib
# (>=1.3 has BoundMethodProxy somewhere, >=3 gets rid of it) and python
# (python >=3.4 has WeakMethod, earlier versions don't)
try:
from matplotlib.cbook import BoundMethodProxy
except ImportError:
try:
from matplotlib.cbook import _BoundMethodProxy as BoundMethodProxy
except ImportError:
try:
from matplotlib.cbook import WeakMethod
except ImportError:
try:
from weakref import WeakMethod
except ImportError:
try:
from weakrefmethod import WeakMethod
except ImportError:
raise ImportError("Could not import WeakMethod from "
"anywhere. Try installing the "
"weakrefmethod package or use a more "
"recent version of python or matplotlib")
class BoundMethodProxy(WeakMethod):
@property
def func(self):
return self()
from . import widgets
from ..specwarnings import warn
interactive_help_message = """
Interactive key commands for plotter. An additional help message may appear if
you have initiated the fitter.
'?' - bring up this message
'f' - initiate the /f/itter
'b' - initiate the /b/aseliner
'B' - initiate the /b/aseliner (reset the selection too)
'r' - re-attach matplotlib keys
'R' - redraw the plot cleanly
'i' : individual components / show each fitted component
"""
xlabel_table = {'speed': 'Velocity'}
class Plotter(object):
"""
Class to plot a spectrum
"""
def __init__(self, Spectrum, autorefresh=True, title="", xlabel=None,
silent=True, plotscale=1.0, **kwargs):
import matplotlib.pyplot
self._pyplot = matplotlib.pyplot
self.figure = None
self.axis = None
self.Spectrum = Spectrum
# plot parameters
self.offset = 0.0 # vertical offset
self.autorefresh = autorefresh
self.xlabel = xlabel
self.title = title
self.errorplot = None
self.plotkwargs = kwargs
self._xlim = [None,None]
self._ylim = [None,None]
self.debug = False
self.keyclick = None
self.silent = silent
self.plotscale = plotscale
self._xclick1 = None
self._xclick2 = None
self.automake_fitter_tool = False
self._active_gui = None
@property
def _xunit(self):
return self.Spectrum.xarr.unit
def _get_prop(xy, minmax):
def getprop(self):
if xy == 'x':
if minmax == 'min':
if self._xlim[0] is not None and self._xunit:
try:
self._xlim[0]._unit = self._xunit
except AttributeError:
self._xlim[0] = u.Quantity(self._xlim[0], self._xunit)
return self._xlim[0]
elif minmax == 'max':
if self._xlim[1] is not None and self._xunit:
try:
self._xlim[1]._unit = self._xunit
except AttributeError:
self._xlim[1] = u.Quantity(self._xlim[1], self._xunit)
return self._xlim[1]
elif xy == 'y':
if minmax == 'min':
return self._ylim[0]
elif minmax == 'max':
return self._ylim[1]
return getprop
def _set_prop(xy, minmax):
def setprop(self, value):
if self.debug:
frm = inspect.stack()
print(frm[1],"Setting %s%s to %s" % (xy,minmax,value))
if xy == 'x':
if minmax == 'min':
self._xlim[0] = value
elif minmax == 'max':
self._xlim[1] = value
elif xy == 'y':
if minmax == 'min':
self._ylim[0] = value
elif minmax == 'max':
self._ylim[1] = value
return setprop
xmin = property(fget=_get_prop('x','min'),fset=_set_prop('x','min'))
xmax = property(fget=_get_prop('x','max'),fset=_set_prop('x','max'))
ymin = property(fget=_get_prop('y','min'),fset=_set_prop('y','min'))
ymax = property(fget=_get_prop('y','max'),fset=_set_prop('y','max'))
def _disconnect_matplotlib_keys(self):
"""
Disconnected the matplotlib key-press callbacks
"""
if self.figure is not None:
cbs = self.figure.canvas.callbacks.callbacks
# this may cause problems since the dict of key press events is a
# dict, i.e. not ordered, and we want to pop the first one...
mpl_keypress_handler = self.figure.canvas.manager.key_press_handler_id
try:
self._mpl_key_callbacks = {mpl_keypress_handler:
cbs['key_press_event'].pop(mpl_keypress_handler)}
except KeyError:
bmp = BoundMethodProxy(self.figure.canvas.manager.key_press)
self._mpl_key_callbacks = {mpl_keypress_handler:
bmp}
def _reconnect_matplotlib_keys(self):
"""
Reconnect the previously disconnected matplotlib keys
"""
if self.figure is not None and hasattr(self,'_mpl_key_callbacks'):
self.figure.canvas.callbacks.callbacks['key_press_event'].update(self._mpl_key_callbacks)
elif self.figure is not None:
mpl_keypress_handler = self.figure.canvas.manager.key_press_handler_id
bmp = BoundMethodProxy(self.figure.canvas.manager.key_press)
self.figure.canvas.callbacks.callbacks['key_press_event'].update({mpl_keypress_handler:
bmp})
def __call__(self, figure=None, axis=None, clear=True, autorefresh=None,
plotscale=1.0, override_plotkwargs=False, **kwargs):
"""
Plot a spectrum
Keywords:
figure - either a matplotlib figure instance or a figure number
to pass into pyplot.figure.
axis - Alternative to figure, can pass an axis instance and use
it as the plotting canvas
clear - Clear the axis before plotting?
"""
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
self.axis = self.figure.gca()
elif type(figure) is int:
self.figure = self._pyplot.figure(figure)
self.axis = self.figure.gca()
elif self.figure is None:
if isinstance(axis,matplotlib.axes.Axes):
self.axis = axis
self.figure = axis.figure
else:
self.figure = self._pyplot.figure()
if hasattr(self.figure, 'number') and not self._pyplot.fignum_exists(self.figure.number):
self.figure = self._pyplot.figure(self.figure.number)
# always re-connect the interactive keys to avoid frustration...
self._mpl_reconnect()
if axis is not None:
#self._mpl_disconnect()
self.axis = axis
self.figure = axis.figure
#self._mpl_connect()
elif len(self.figure.axes) > 0 and self.axis is None:
self.axis = self.figure.axes[0] # default to first axis
elif self.axis is None:
self.axis = self.figure.gca()
# A check to deal with issue #117: if you close the figure, the axis
# still exists, but it cannot be reattached to a figure
if (hasattr(self.axis.get_figure(), 'number') and
not (self.axis.get_figure() is self._pyplot.figure(self.axis.get_figure().number))):
self.axis = self.figure.gca()
if self.axis is not None and self.axis not in self.figure.axes:
# if you've cleared the axis, but the figure is still open, you
# need a new axis
self.figure.add_axes(self.axis)
if clear and self.axis is not None:
self.axis.clear()
# Need to empty the stored model plots
if hasattr(self.Spectrum, 'fitter'):
self.Spectrum.fitter.clear()
if autorefresh is not None:
self.autorefresh = autorefresh
self.plotscale = plotscale
if self.plotkwargs and not override_plotkwargs:
self.plotkwargs.update(kwargs)
else:
self.plotkwargs = kwargs
self.plot(**kwargs)
def _mpl_connect(self):
if self.keyclick is None:
self.keyclick = self.figure.canvas.mpl_connect('key_press_event',self.parse_keys)
def _mpl_disconnect(self):
self.figure.canvas.mpl_disconnect(self.keyclick)
self.keyclick = None
def disconnect(self):
"""
Disconnect the matplotlib interactivity of this pyspeckit plotter.
"""
self._mpl_disconnect()
def connect(self):
"""
Connect to the matplotlib key-parsing interactivity
"""
self._mpl_connect()
def _mpl_reconnect(self):
self._mpl_disconnect()
self._mpl_connect()
# disable fullscreen & grid
self._pyplot.rcParams['keymap.fullscreen'] = 'ctrl+f'
self._pyplot.rcParams['keymap.grid'] = 'ctrl+g'
def plot(self, offset=0.0, xoffset=0.0, color='k', drawstyle='steps-mid',
linewidth=0.5, errstyle=None, erralpha=0.2, errcolor=None,
silent=None, reset=True, refresh=True, use_window_limits=None,
useOffset=False, **kwargs):
"""
Plot the spectrum!
Tries to automatically find a reasonable plotting range if one is not
set.
Parameters
----------
offset : float
vertical offset to add to the spectrum before plotting. Useful if
you want to overlay multiple spectra on a single plot
xoffset: float
An x-axis shift. I don't know why you'd want this...
color : str
default to plotting spectrum in black
drawstyle : 'steps-mid' or str
'steps-mid' for histogram-style plotting. See matplotlib's plot
for more information
linewidth : float
Line width in pixels. Narrow lines are helpful when histo-plotting
errstyle : 'fill', 'bars', or None
can be "fill", which draws partially transparent boxes around the
data to show the error region, or "bars" which draws standard
errorbars. ``None`` will display no errorbars
useOffset : bool
Use offset-style X/Y coordinates (e.g., 1 + 1.483e10)? Defaults to
False because these are usually quite annoying.
xmin/xmax/ymin/ymax : float
override defaults for plot range. Once set, these parameters are
sticky (i.e., replotting will use the same ranges). Passed to
`reset_limits`
reset_[xy]limits : bool
Reset the limits to "sensible defaults". Passed to `reset_limits`
ypeakscale : float
Scale up the Y maximum value. Useful to keep the annotations away
from the data. Passed to `reset_limits`
reset : bool
Reset the x/y axis limits? If set, `reset_limits` will be called.
"""
if self.axis is None:
raise Exception("You must call the Plotter class to initiate the canvas before plotting.")
self.offset = offset
# there is a bug where this only seems to update the second time it is called
self.label(**kwargs)
self.label(**kwargs)
for arg in ['title','xlabel','ylabel']:
if arg in kwargs:
kwargs.pop(arg)
reset_kwargs = {}
for arg in ['xmin', 'xmax', 'ymin', 'ymax', 'reset_xlimits',
'reset_ylimits', 'ypeakscale']:
if arg in kwargs:
reset_kwargs[arg] = kwargs.pop(arg)
if (use_window_limits is None and any(k in reset_kwargs for k in
('xmin','xmax','reset_xlimits'))):
use_window_limits = False
if use_window_limits:
self._stash_window_limits()
# for filled errorbars, order matters.
inds = np.argsort(self.Spectrum.xarr)
if errstyle is not None:
if errcolor is None:
errcolor = color
if errstyle == 'fill':
self.errorplot = [self.axis.fill_between(steppify(self.Spectrum.xarr.value[inds]+xoffset, isX=True),
steppify((self.Spectrum.data*self.plotscale+self.offset-self.Spectrum.error*self.plotscale)[inds]),
steppify((self.Spectrum.data*self.plotscale+self.offset+self.Spectrum.error*self.plotscale)[inds]),
facecolor=errcolor, edgecolor=errcolor, alpha=erralpha, **kwargs)]
elif errstyle == 'bars':
self.errorplot = self.axis.errorbar(self.Spectrum.xarr[inds].value+xoffset,
self.Spectrum.data[inds]*self.plotscale+self.offset,
yerr=self.Spectrum.error[inds]*self.plotscale,
ecolor=errcolor, fmt='none',
**kwargs)
self._spectrumplot = self.axis.plot(self.Spectrum.xarr.value[inds]+xoffset,
self.Spectrum.data[inds]*self.plotscale+self.offset,
color=color,
drawstyle=drawstyle,
linewidth=linewidth, **kwargs)
self.axis.ticklabel_format(useOffset=useOffset)
if use_window_limits:
self._reset_to_stashed_limits()
if silent is not None:
self.silent = silent
if reset:
self.reset_limits(use_window_limits=use_window_limits, **reset_kwargs)
if self.autorefresh and refresh:
self.refresh()
# Maybe it's OK to call 'plot' when there is an active gui tool
# (e.g., baseline or specfit)?
#if self._active_gui:
# self._active_gui = None
# warn("An active GUI was found while initializing the "
# "plot. This is somewhat dangerous and may result "
# "in broken interactivity.")
def _stash_window_limits(self):
self._window_limits = self.axis.get_xlim(),self.axis.get_ylim()
if self.debug:
print("Stashed window limits: ",self._window_limits)
def _reset_to_stashed_limits(self):
self.axis.set_xlim(*self._window_limits[0])
self.axis.set_ylim(*self._window_limits[1])
self.xmin,self.xmax = self._window_limits[0]
self.ymin,self.ymax = self._window_limits[1]
if self.debug:
print("Recovered window limits: ",self._window_limits)
def reset_limits(self, xmin=None, xmax=None, ymin=None, ymax=None,
reset_xlimits=True, reset_ylimits=True, ypeakscale=1.2,
silent=None, use_window_limits=False, **kwargs):
"""
Automatically or manually reset the plot limits
"""
# if not use_window_limits: use_window_limits = False
if self.debug:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
print(zip(args,values))
if use_window_limits:
# this means DO NOT reset!
# it simply sets self.[xy][min/max] = current value
self.set_limits_from_visible_window()
else:
if silent is not None:
self.silent = silent
# if self.xmin and self.xmax:
if (reset_xlimits or self.Spectrum.xarr.min().value < self.xmin or self.Spectrum.xarr.max().value > self.xmax):
if not self.silent:
warn("Resetting X-axis min/max because the plot is out of bounds.")
self.xmin = None
self.xmax = None
if xmin is not None:
self.xmin = u.Quantity(xmin, self._xunit)
elif self.xmin is None:
self.xmin = u.Quantity(self.Spectrum.xarr.min().value, self._xunit)
if xmax is not None:
self.xmax = u.Quantity(xmax, self._xunit)
elif self.xmax is None:
self.xmax = u.Quantity(self.Spectrum.xarr.max().value, self._xunit)
xpixmin = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmin.value))
xpixmax = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmax.value))
if xpixmin>xpixmax:
xpixmin,xpixmax = xpixmax,xpixmin
elif xpixmin == xpixmax:
if reset_xlimits:
raise Exception("Infinite recursion error. Maybe there are no valid data?")
if not self.silent:
warn("ERROR: the X axis limits specified were invalid. Resetting.")
self.reset_limits(reset_xlimits=True, ymin=ymin, ymax=ymax,
reset_ylimits=reset_ylimits,
ypeakscale=ypeakscale, **kwargs)
return
if self.ymin is not None and self.ymax is not None:
# this is utter nonsense....
if (np.nanmax(self.Spectrum.data) < self.ymin or np.nanmin(self.Spectrum.data) > self.ymax
or reset_ylimits):
if not self.silent and not reset_ylimits:
warn("Resetting Y-axis min/max because the plot is out of bounds.")
self.ymin = None
self.ymax = None
if ymin is not None:
self.ymin = ymin
elif self.ymin is None:
yminval = np.nanmin(self.Spectrum.data[xpixmin:xpixmax])
# Increase the range fractionally. This means dividing a positive #, multiplying a negative #
if yminval < 0:
self.ymin = float(yminval)*float(ypeakscale)
else:
self.ymin = float(yminval)/float(ypeakscale)
if ymax is not None:
self.ymax = ymax
elif self.ymax is None:
ymaxval = (np.nanmax(self.Spectrum.data[xpixmin:xpixmax])-self.ymin)
if ymaxval > 0:
self.ymax = float(ymaxval) * float(ypeakscale) + self.ymin
else:
self.ymax = float(ymaxval) / float(ypeakscale) + self.ymin
self.ymin += self.offset
self.ymax += self.offset
self.axis.set_xlim(self.xmin.value if hasattr(self.xmin, 'value') else self.xmin,
self.xmax.value if hasattr(self.xmax, 'value') else self.xmax)
self.axis.set_ylim(self.ymin, self.ymax)
def label(self, title=None, xlabel=None, ylabel=None, verbose_label=False,
**kwargs):
"""
Label the plot, with an attempt to parse standard units into nice latex labels
Parameters
----------
title : str
xlabel : str
ylabel : str
verbose_label: bool
"""
if title is not None:
self.title = title
elif hasattr(self.Spectrum,'specname'):
self.title = self.Spectrum.specname
if self.title is not "":
self.axis.set_title(self.title)
if xlabel is not None:
log.debug("setting xlabel={0}".format(xlabel))
self.xlabel = xlabel
elif self._xunit:
try:
self.xlabel = xlabel_table[self._xunit.physical_type.lower()]
except KeyError:
self.xlabel = self._xunit.physical_type.title()
# WAS: self.xlabel += " ("+u.Unit(self._xunit).to_string()+")"
self.xlabel += " ({0})".format(self._xunit.to_string())
log.debug("xunit is {1}. set xlabel={0}".format(self.xlabel,
self._xunit))
if verbose_label:
self.xlabel = "%s %s" % (self.Spectrum.xarr.velocity_convention.title(),
self.xlabel)
else:
log.warn("Plotter: xlabel was not set")
if self.xlabel is not None:
self.axis.set_xlabel(self.xlabel)
if ylabel is not None:
self.axis.set_ylabel(ylabel)
elif self.Spectrum.unit in ['Ta*','Tastar']:
self.axis.set_ylabel("$T_A^*$ (K)")
elif self.Spectrum.unit in ['K']:
self.axis.set_ylabel("Brightness Temperature $T$ (K)")
elif self.Spectrum.unit == 'mJy':
self.axis.set_ylabel("$S_\\nu$ (mJy)")
elif self.Spectrum.unit == 'Jy':
self.axis.set_ylabel("$S_\\nu$ (Jy)")
else:
if isinstance(self.Spectrum.unit, str) and "$" in self.Spectrum.unit:
# assume LaTeX already
self.axis.set_ylabel(self.Spectrum.unit)
elif isinstance(self.Spectrum.unit, str):
self.axis.set_ylabel(self.Spectrum.unit)
else:
label_units = self.Spectrum.unit.to_string(format='latex')
if 'mathring{A}' in label_units:
label_units = label_units.replace('\mathring{A}', 'A')
if '\overset' in label_units:
label_units = label_units.replace('\overset', '^')
self.axis.set_ylabel(label_units)
@property
def ylabel(self):
return self.axis.get_ylabel()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def savefig(self,fname,bbox_inches='tight',**kwargs):
"""
simple wrapper of maplotlib's savefig.
"""
self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)
def parse_keys(self,event):
"""
Parse key commands entered from the keyboard
"""
if hasattr(event,'key'):
if event.key == '?':
print(interactive_help_message)
elif event.key == 'f':
print("\n\nFitter initiated from the interactive plotter.")
# extra optional text:
# Matplotlib shortcut keys ('g','l','p',etc.) are disabled. Re-enable with 'r'"
if self._active_gui == self.Spectrum.specfit and self._active_gui._check_connections(verbose=False):
print("Fitter is already active. Use 'q' to quit the fitter.")
elif self._active_gui == self.Spectrum.specfit and not self._active_gui._check_connections(verbose=False):
# forcibly clear connections
self._active_gui.clear_all_connections()
# the 'clear_all_connections' code *explicitly* makes the
# following line correct, except in the case that there is
# no canvas...
assert self._active_gui is None
self.activate_interactive_fitter()
else:
self.activate_interactive_fitter()
assert self._active_gui == self.Spectrum.specfit
assert self._active_gui._check_connections(verbose=False)
if not hasattr(self,'FitterTool') and self.automake_fitter_tool:
self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)
elif hasattr(self,'FitterTool') and self.FitterTool.toolfig.number not in self._pyplot.get_fignums():
self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)
elif event.key is not None and event.key.lower() == 'b':
if event.key == 'b':
print("\n\nBaseline initiated from the interactive plotter")
elif event.key == 'B':
print("\n\nBaseline initiated from the interactive plotter (with reset)")
print("Matplotlib shortcut keys ('g','l','p',etc.) are disabled. Re-enable with 'r'")
self.activate_interactive_baseline_fitter(reset_selection=(event.key=='B'))
if not hasattr(self,'FitterTool') and self.automake_fitter_tool:
self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)
elif hasattr(self,'FitterTool') and self.FitterTool.toolfig.number not in self._pyplot.get_fignums():
self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)
elif event.key == 'r':
# print("\n\nReconnected matplotlib shortcut keys.")
self._reconnect_matplotlib_keys()
elif event.key == 'R':
self()
elif event.key == 'i':
self.Spectrum.specfit.plot_fit(show_components=True)
def get_two_clicks(self,event):
if self._xclick1 is None:
self._xclick1 = event.xdata
elif self._xclick2 is None:
self._xclick2 = event.xdata
def set_limits_from_visible_window(self, debug=False):
""" Hopefully self-descriptive: set the x and y limits from the
currently visible window (use this if you use the pan/zoom tools or
manually change the limits) """
if debug:
print("Changing x limits from {},{} to {},{}".format(self.xmin,self.xmax,self.axis.get_xlim()[0],self.axis.get_xlim()[1]))
print("Changing y limits from {},{} to {},{}".format(self.ymin,self.ymax,self.axis.get_ylim()[0],self.axis.get_ylim()[1]))
self.xmin, self.xmax = self.axis.get_xlim()
self.ymin, self.ymax = self.axis.get_ylim()
if debug:
print("New x limits {},{} == {},{}".format(self.xmin,self.xmax,self.axis.get_xlim()[0],self.axis.get_xlim()[1]))
print("New y limits {},{} == {},{}".format(self.ymin,self.ymax,self.axis.get_ylim()[0],self.axis.get_ylim()[1]))
def copy(self, parent=None):
"""
Create a copy of the plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newplotter = copy.copy(self)
newplotter.Spectrum = parent
newplotter.axis = None
newplotter.figure = None
return newplotter
def line_ids(self, line_names, line_xvals, xval_units=None, auto_yloc=True,
velocity_offset=None, velocity_convention='radio',
auto_yloc_fraction=0.9, **kwargs):
"""
Add line ID labels to a plot using lineid_plot
http://oneau.wordpress.com/2011/10/01/line-id-plot/
https://github.com/phn/lineid_plot
http://packages.python.org/lineid_plot/
Parameters
----------
line_names : list
A list of strings to label the specified x-axis values
line_xvals : list
List of x-axis values (e.g., wavelengths) at which to label the lines.
Can be a list of quantities.
xval_units : string
The unit of the line_xvals if they are not given as quantities
velocity_offset : quantity
A velocity offset to apply to the inputs if they are in frequency
or wavelength units
velocity_convention : 'radio' or 'optical' or 'doppler'
Used if the velocity offset is given
auto_yloc : bool
If set, overrides box_loc and arrow_tip (the vertical position of
the lineid labels) in kwargs to be `auto_yloc_fraction` of the plot
range
auto_yloc_fraction: float in range [0,1]
The fraction of the plot (vertically) at which to place labels
Examples
--------
>>> import numpy as np
>>> import pyspeckit
>>> sp = pyspeckit.Spectrum(
xarr=pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,101),
unit='km/s', refX=6562.8, refX_unit='angstrom'),
data=np.random.randn(101), error=np.ones(101))
>>> sp.plotter()
>>> sp.plotter.line_ids(['H$\\alpha$'],[6562.8],xval_units='angstrom')
"""
import lineid_plot
if velocity_offset is not None:
assert velocity_offset.unit.is_equivalent(u.km/u.s)
doppler = getattr(u, 'doppler_{0}'.format(velocity_convention))
if self.Spectrum.xarr.refX is not None:
equivalency = doppler(self.Spectrum.xarr.refX)
else:
equivalency = doppler(self.Spectrum.xarr.as_unit(u.GHz)[0])
xvals = []
linenames_toplot = []
for xv,ln in zip(line_xvals, line_names):
if hasattr(xv, 'unit'):
pass
else:
xv = u.Quantity(xv, xval_units)
xv = xv.to(u.km/u.s,
equivalencies=equivalency)
if velocity_offset is not None:
xv = xv + velocity_offset
xv = xv.to(self.Spectrum.xarr.unit, equivalencies=equivalency)
if self.Spectrum.xarr.in_range(xv):
xvals.append(xv.value)
linenames_toplot.append(ln)
if len(xvals) != len(line_xvals):
log.warn("Skipped {0} out-of-bounds lines when plotting line IDs."
.format(len(line_xvals)-len(xvals)))
if auto_yloc:
yr = self.axis.get_ylim()
kwargs['box_loc'] = (yr[1]-yr[0])*auto_yloc_fraction + yr[0]
kwargs['arrow_tip'] = (yr[1]-yr[0])*(auto_yloc_fraction*0.9) + yr[0]
lineid_plot.plot_line_ids(self.Spectrum.xarr,
self.Spectrum.data,
xvals,
linenames_toplot,
ax=self.axis,
**kwargs)
def line_ids_from_measurements(self, auto_yloc=True,
auto_yloc_fraction=0.9, **kwargs):
"""
Add line ID labels to a plot using lineid_plot
http://oneau.wordpress.com/2011/10/01/line-id-plot/
https://github.com/phn/lineid_plot
http://packages.python.org/lineid_plot/
Parameters
----------
auto_yloc : bool
If set, overrides box_loc and arrow_tip (the vertical position of
the lineid labels) in kwargs to be `auto_yloc_fraction` of the plot
range
auto_yloc_fraction: float in range [0,1]
The fraction of the plot (vertically) at which to place labels
Examples
--------
>>> import numpy as np
>>> import pyspeckit
>>> sp = pyspeckit.Spectrum(
xarr=pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,101),
units='km/s', refX=6562.8, refX_unit='angstroms'),
data=np.random.randn(101), error=np.ones(101))
>>> sp.plotter()
>>> sp.specfit(multifit=None, fittype='gaussian', guesses=[1,0,1]) # fitting noise....
>>> sp.measure()
>>> sp.plotter.line_ids_from_measurements()
"""
import lineid_plot
if hasattr(self.Spectrum,'measurements'):
measurements = self.Spectrum.measurements
if auto_yloc:
yr = self.axis.get_ylim()
kwargs['box_loc'] = (yr[1]-yr[0])*auto_yloc_fraction + yr[0]
kwargs['arrow_tip'] = (yr[1]-yr[0])*(auto_yloc_fraction*0.9) + yr[0]
lineid_plot.plot_line_ids(self.Spectrum.xarr, self.Spectrum.data,
[v['pos'] for v in
measurements.lines.values()],
measurements.lines.keys(), ax=self.axis,
**kwargs)
else:
warn("Cannot add line IDs from measurements unless measurements have been made!")
def activate_interactive_fitter(self):
"""
Attempt to activate the interactive fitter
"""
if self._active_gui is not None:
# This should not be reachable. Clearing connections is the
# "right" behavior if this becomes reachable, but I'd rather raise
# an exception because I don't want to get here ever
self._active_gui.clear_all_connections()
raise ValueError("GUI was active when 'f' key pressed")
self._activate_interactive(self.Spectrum.specfit, interactive=True)
def activate_interactive_baseline_fitter(self, **kwargs):
"""
Attempt to activate the interactive baseline fitter
"""
if self._active_gui is not None:
# This should not be reachable. Clearing connections is the
# "right" behavior if this becomes reachable, but I'd rather raise
# an exception because I don't want to get here ever
gui_was = self._active_gui
self._active_gui.clear_all_connections()
raise ValueError("GUI {0} was active when 'b' key pressed"
.format(gui_was))
self._activate_interactive(self.Spectrum.baseline, interactive=True,
**kwargs)
def _activate_interactive(self, object_to_activate, **kwargs):
self._disconnect_matplotlib_keys()
self._active_gui = object_to_activate
# activating the gui calls clear_all_connections, which disconnects the
# gui
try:
self._active_gui(**kwargs)
self._active_gui = object_to_activate
assert self._active_gui is not None
except Exception as ex:
self._active_gui = None
raise ex
def parse_units(labelstring):
import re
labelstring = re.sub("um","$\mu$m",labelstring)
labelstring = re.sub("-1","$^{-1}$",labelstring)
labelstring = re.sub("-2","$^{-2}$",labelstring)
labelstring = re.sub("-3","$^{-3}$",labelstring)
labelstring = re.sub("ergss","ergs s",labelstring)
return labelstring
def parse_norm(norm):
"""
Expected format: norm = 10E15
"""
try:
base, exp = norm.split('E')
except ValueError:
base, exp = norm.split('e')
if float(base) == 1.0:
norm = '10'
else:
norm = base
norm += '^{%s}' % exp
return norm
def steppify(arr,isX=False):
"""
*support function*
Converts an array to double-length for step plotting
"""
if isX:
interval = abs(arr[1:]-arr[:-1]) / 2.0
newarr = np.array(list(zip(arr[:-1]-interval,arr[:-1]+interval))).ravel()
newarr = np.concatenate([newarr,2*[newarr[-1]+interval[-1]]])
else:
newarr = np.array(list(zip(arr,arr))).ravel()
return newarr
| mit |
rishikksh20/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
saebrahimi/Emotion-Recognition-EmotiW2015 | common/disptools.py | 2 | 11942 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def pred_visualization(fname, arrays, picks, img_shape, tile_spacing=(0,0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""Used for visualization of predictions
Args:
fname: filename for saving the image
arrays: list of arrays containing the frames, first array is assumed to be
ground truth (all of shape Nxnframesxframesize**2)
picks: list containing indices of cases that should be used
img_shape: shape of a frame
tile_spacing: spacing between the tiles
scale_rows_to_unit_interval: see tile_raster_images
output_pixel_vals: see tile_raster_images
"""
ncases = len(picks)
narrays = len(arrays)
if narrays > 1:
horizon = arrays[1].shape[1]
horizon_gt = arrays[0].shape[1]
n_presteps = horizon_gt - horizon
if n_presteps > 0:
visdata = np.ones((ncases, horizon_gt * narrays, np.prod(img_shape)))
visdata[:,:horizon_gt] = arrays[0][picks]
for i in range(1, narrays):
visdata[:, i*horizon_gt:(i+1)*horizon_gt] = \
np.hstack((
(np.ones((ncases, n_presteps, np.prod(img_shape)))),
arrays[i][picks]))
else:
visdata = np.hstack([arrays[i][picks] for i in range(narrays)])
else:
horizon = arrays[0].shape[1]
horizon_gt = horizon
visdata = np.hstack([arrays[i][picks] for i in range(narrays)])
visdata = visdata.reshape(ncases*narrays*horizon_gt,-1)
im = tile_raster_images(visdata, img_shape, (ncases*narrays, horizon_gt),
tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
for i in range(len(picks)*len(arrays)):
#insert white patches for n_presteps
for j in range(horizon_gt-horizon):
if i % len(arrays) != 0:
im[i*img_shape[0] + i*tile_spacing[0]:(i+1)*img_shape[0] + i*tile_spacing[0],
j*img_shape[1] + j*tile_spacing[1]:(j+1)*img_shape[1] + j*tile_spacing[1]] = 255
#np.insert(im, [i * len(arrays) * img_shape[0] + i * (len(arrays)-1) * tile_spacing[0] for i in range(len(picks))], 0)
h,w = im.shape
fig = plt.figure(frameon=False)
#fig.set_size_inches(1,h/np.float(w))
fig.set_size_inches(w/24.,h/24.)
ax = plt.Axes(fig, [0.,0.,1.,1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(im, aspect='normal', interpolation='nearest')
fig.savefig(fname, dpi=24)
return im
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max()+eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing = (0, 0),
scale_rows_to_unit_interval = True, output_pixel_vals = True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0, 1] or not
:returns: array suitable for viewing as an image.
(See:`PIL.Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0, 0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape,
dtype=dt) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
X[tile_row * tile_shape[1] +
tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] +
tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H+Hs):tile_row*(H+Hs)+H,
tile_col * (W+Ws):tile_col*(W+Ws)+W
] \
= this_img * c
return out_array
def dispims_white(invwhitening, M, height, width, border=0, bordercolor=0.0,
layout=None, **kwargs):
""" Display a whole stack (colunmwise) of vectorized matrices. Useful
eg. to display the weights of a neural network layer.
"""
numimages = M.shape[1]
M = np.dot(invwhitening, M)
if layout is None:
n0 = int(np.ceil(np.sqrt(numimages)))
n1 = int(np.ceil(np.sqrt(numimages)))
else:
n0, n1 = layout
im = bordercolor * np.ones(((height+border)*n0+border,
(width+border)*n1+border), dtype='<f8')
for i in range(n0):
for j in range(n1):
if i*n1+j < M.shape[1]:
im[i*(height+border)+border:(i+1)*(height+border)+border,
j*(width+border)+border :(j+1)*(width+border)+border] =\
np.vstack((
np.hstack((
np.reshape(M[:, i*n1+j],
(height, width)),
bordercolor*np.ones((height, border),
dtype=float))),
bordercolor*np.ones((border, width+border),
dtype=float)))
plt.imshow(im, cmap=matplotlib.cm.gray, interpolation='nearest', **kwargs)
def CreateMovie(filename, plotter, numberOfFrames, fps):
for i in range(numberOfFrames):
plotter(i)
fname = '_tmp%05d.png' % i
plt.savefig(fname)
plt.clf()
#os.system("rm %s.mp4" % filename)
#os.system("ffmpeg -r "+str(fps)+" -b 1800 -i _tmp%05d.png "+filename+".mp4")
os.system("convert -delay 20 -loop 0 _tmp*.png " +filename+".gif")
os.system("rm _tmp*.png")
def dispimsmovie_patchwise(filename, M, inv, patchsize, fps=5, *args,
**kwargs):
numframes = M.shape[0] / inv.shape[1]
n = M.shape[0]/numframes
def plotter(i):
M_ = M[i*n:n*(i+1)]
M_ = np.dot(inv,M_)
width = int(np.ceil(np.sqrt(M.shape[1])))
image = tile_raster_images(
M_.T, img_shape=(patchsize,patchsize),
tile_shape=(10,10), tile_spacing = (1,1),
scale_rows_to_unit_interval = True, output_pixel_vals = True)
plt.imshow(image,cmap=matplotlib.cm.gray,interpolation='nearest')
plt.axis('off')
CreateMovie(filename, plotter, numframes, fps)
def dispimsmovie(filename, W, filters, nframes, fps=5):
patchsize = np.uint8(np.sqrt(W.shape[0]))
def plotter(i):
dispims_white(W, filters[i*W.shape[1]:(i+1)*W.shape[1], :], patchsize,
patchsize, 1, bordercolor=filters.mean(),
vmin=filters.min(), vmax=filters.max()*0.8)
plt.axis('off')
CreateMovie(filename, plotter, nframes, fps)
def visualizefacenet(fname, imgs, patches_left, patches_right,
true_label, predicted_label):
"""Builds a plot of facenet with attention per RNN step and
classification result
"""
nsamples = imgs.shape[0]
nsteps = patches_left.shape[1]
is_correct = true_label == predicted_label
w = nsteps + 2 + (nsteps % 2)
h = nsamples * 2
plt.clf()
plt.gray()
for i in range(nsamples):
plt.subplot(nsamples, w//2, i*w//2 + 1)
plt.imshow(imgs[i])
msg = ('Prediction: ' + predicted_label[i] + ' TrueLabel: ' +
true_label[i])
if is_correct[i]:
plt.title(msg,color='green')
else:
plt.title(msg,color='red')
plt.axis('off')
for j in range(nsteps):
plt.subplot(h, w, i*2*w + 2 + 1 + j)
plt.imshow(patches_left[i, j])
plt.axis('off')
plt.subplot(h, w, i*2*w + 2 + 1 + j + w)
plt.imshow(patches_right[i, j])
plt.axis('off')
plt.show()
plt.savefig(fname)
if __name__ == '__main__':
from scipy.misc import lena
imgs = lena()[None, ...].repeat(3, axis=0)
patches_left = lena()[None, None, :256].repeat(3, axis=0).repeat(5, axis=1)
patches_right = lena()[None, None, 256:].repeat(3, axis=0).repeat(5, axis=1)
true_label = np.array(['angry', 'angry', 'sad'])
predicted_label = np.array(['sad'] * 3)
visualizefacenet('lena.pdf', imgs, patches_left, patches_right,
true_label, predicted_label)
# vim: set ts=4 sw=4 sts=4 expandtab:
| mit |
JackKelly/neuralnilm_prototype | scripts/e334.py | 2 | 5407 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 1800, 1800],
min_off_durations=[12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.5,
one_target_per_seq=True,
n_seq_per_batch=16,
# subsample_target=8,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=32 + 16 + 8,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
250: 1e-3
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
jreback/pandas | pandas/io/pytables.py | 1 | 167728 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
from contextlib import suppress
import copy
from datetime import date, tzinfo
import itertools
import os
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import warnings
import numpy as np
from pandas._config import config, get_option
from pandas._libs import lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas._typing import (
ArrayLike,
DtypeArg,
FrameOrSeries,
FrameOrSeriesUnion,
Label,
Shape,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.pickle_compat import patch_pickle
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_list_like,
is_string_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
concat,
isna,
)
from pandas.core.arrays import Categorical, DatetimeArray, PeriodArray
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.construction import extract_array
from pandas.core.indexes.api import ensure_index
from pandas.io.common import stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
from tables import Col, File, Node
# versioning attribute
_version = "0.15.2"
# encoding
_default_encoding = "UTF-8"
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode("UTF-8")
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
Term = PyTablesExpr
def _ensure_term(where, scope_level: int):
"""
Ensure that the where is a Term or a list of Term.
This makes sure that we are capturing the scope of variables that are
passed create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
where = [
Term(term, scope_level=level + 1) if maybe_expression(term) else term
for term in where
if term is not None
]
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where if where is None or len(where) else None
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
# axes map
_AXES_MAP = {DataFrame: [0]}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix("io.hdf"):
config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
config.register_option(
"default_format",
None,
format_doc,
validator=config.is_one_of_factory(["fixed", "table", None]),
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
with suppress(AttributeError):
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
return _table_mod
# interface to/from ###
def to_hdf(
path_or_buf,
key: str,
value: FrameOrSeries,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool = False,
format: Optional[str] = None,
index: bool = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool] = None,
data_columns: Optional[Union[bool, List[str]]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
else:
# NB: dropna is not passed to `put`
f = lambda store: store.put(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
errors=errors,
encoding=encoding,
dropna=dropna,
)
path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(
path_or_buf,
key=None,
mode: str = "r",
errors: str = "strict",
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
columns=None,
iterator=False,
chunksize: Optional[int] = None,
**kwargs,
):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path_or_buf : str, path object, pandas.HDFStore or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.h5``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, default 'r'
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
DataFrame.to_hdf : Write a HDF file from a DataFrame.
HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
f"mode {mode} is not allowed while performing a read. "
f"Allowed modes are r, r+ and a."
)
# grab the scope
if where is not None:
where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise OSError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
else:
path_or_buf = stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError(
"Dataset(s) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
"key must be provided when HDF5 "
"file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
key,
where=where,
start=start,
stop=stop,
columns=columns,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
with suppress(AttributeError):
store.close()
raise
def _is_metadata_of(group: "Node", parent_group: "Node") -> bool:
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == "meta":
return True
current = current._v_parent
return False
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
>>> store['foo'] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: Optional["File"]
_mode: str
_complevel: int
_fletcher32: bool
def __init__(
self,
path,
mode: str = "a",
complevel: Optional[int] = None,
complib=None,
fletcher32: bool = False,
**kwargs,
):
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value):
self.put(key, value)
def __delitem__(self, key: str):
return self.remove(key)
def __getattr__(self, name: str):
""" allow attribute access to get stores """
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self, include: str = "pandas") -> List[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
.. versionadded:: 1.1.0
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode: str = "a", **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = list({t.non_index_axes[0][0] for t in _tbls})[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: FrameOrSeries,
format=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
):
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
.. versionadded:: 1.1.0
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: FrameOrSeries,
format=None,
axes=None,
index=True,
append=True,
complib=None,
complevel: Optional[int] = None,
columns=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
chunksize=None,
expectedrows=None,
dropna: Optional[bool] = None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: Dict,
value,
selector,
data_columns=None,
axes=None,
dropna=False,
**kwargs,
):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values: List = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: Optional[int] = None,
kind: Optional[str] = None,
):
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self):
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where="/"):
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Optional["Node"]:
""" return the node with the key or None if it does not exist """
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> Union["GenericFixed", "Table"]:
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode="w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: Optional[int] = None,
fletcher32: bool = False,
overwrite=True,
):
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: Union[bool, List[str]] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
""" validate / deprecate formats """
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: Optional[FrameOrSeries] = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> Union["GenericFixed", "Table"]:
""" return a suitable class to operate """
cls: Union[Type["GenericFixed"], Type["Table"]]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
def error(t):
# return instead of raising so mypy can tell where we are raising
return TypeError(
f"cannot properly create the storer for: [{t}] [group->"
f"{group},value->{type(value)},format->{format}"
)
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise error("_STORER_MAP") from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt]
except KeyError as err:
raise error("_TABLE_MAP") from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: FrameOrSeries,
format,
axes=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
fletcher32=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
):
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: "Node"):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> "Node":
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> "Node":
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
class TableIterator:
"""
Define the iteration interface on a table
Parameters
----------
store : HDFStore
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : bool, default False
Whether to use the default iterator.
chunksize : the passed chunking value (default is 100000)
auto_close : bool, default False
Whether to automatically close the store at the end of iteration.
"""
chunksize: Optional[int]
store: HDFStore
s: Union["GenericFixed", "Table"]
def __init__(
self,
store: HDFStore,
s: Union["GenericFixed", "Table"],
func,
where,
nrows,
start=None,
stop=None,
iterator: bool = False,
chunksize: Optional[int] = None,
auto_close: bool = False,
):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
if self.coordinates is None:
raise ValueError("Cannot iterate until get_result is called.")
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates: bool = False):
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
raise TypeError("can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
if not isinstance(self.s, Table):
raise TypeError("can only read_coordinates on a table")
where = self.s.read_coordinates(
where=self.where, start=self.start, stop=self.stop
)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol:
"""
an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ["freq", "tz", "index_name"]
name: str
cname: str
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: Optional[str] = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
):
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
""" return whether I am an indexed column """
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
return getattr(self.table.cols, self.cname).is_indexed
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
val_kind = _ensure_decoded(self.kind)
values = _maybe_convert(values, val_kind, encoding, errors)
kwargs = {}
kwargs["name"] = _ensure_decoded(self.index_name)
if self.freq is not None:
kwargs["freq"] = _ensure_decoded(self.freq)
factory: Union[Type[Index], Type[DatetimeIndex]] = Index
if is_datetime64_dtype(values.dtype) or is_datetime64tz_dtype(values.dtype):
factory = DatetimeIndex
# making an Index instance could throw a number of different errors
try:
new_pd_index = factory(values, **kwargs)
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = factory(values, **kwargs)
new_pd_index = _set_tz(new_pd_index, self.tz)
return new_pd_index, new_pd_index
def take_data(self):
""" return the values"""
return self.values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None):
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if _ensure_decoded(self.kind) == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate_names(self):
pass
def validate_and_set(self, handler: "AppendableTable", append: bool):
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info):
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
else:
if value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def set_attr(self):
""" set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: "AppendableTable"):
""" validate that kind=category does not change the categories """
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(new_metadata, cur_metadata)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: "AppendableTable"):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self) -> bool:
return False
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep : str
encoding : str
errors : str
"""
assert isinstance(values, np.ndarray), type(values)
values = Int64Index(np.arange(len(values)))
return values, values
def set_attr(self):
pass
class DataCol(IndexCol):
"""
a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ["tz", "ordered"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname=None,
pos=None,
tz=None,
ordered=None,
table=None,
meta=None,
metadata=None,
dtype: Optional[DtypeArg] = None,
data=None,
):
super().__init__(
name=name,
values=values,
kind=kind,
typ=typ,
pos=pos,
cname=cname,
tz=tz,
ordered=ordered,
table=table,
meta=meta,
metadata=metadata,
)
self.dtype = dtype
self.data = data
@property
def dtype_attr(self) -> str:
return f"{self.name}_dtype"
@property
def meta_attr(self) -> str:
return f"{self.name}_meta"
def __repr__(self) -> str:
temp = tuple(
map(
pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
)
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "dtype", "pos"]
)
def set_data(self, data: ArrayLike):
assert data is not None
assert self.dtype is None
data, dtype_name = _get_data_and_dtype_name(data)
self.data = data
self.dtype = dtype_name
self.kind = _dtype_to_kind(dtype_name)
def take_data(self):
""" return the data """
return self.data
@classmethod
def _get_atom(cls, values: ArrayLike) -> "Col":
"""
Get an appropriately typed and shaped pytables.Col object for values.
"""
dtype = values.dtype
# error: "ExtensionDtype" has no attribute "itemsize"
itemsize = dtype.itemsize # type: ignore[attr-defined]
shape = values.shape
if values.ndim == 1:
# EA, use block shape pretending it is 2D
# TODO(EA2D): not necessary with 2D EAs
shape = (1, values.size)
if isinstance(values, Categorical):
codes = values.codes
atom = cls.get_atom_data(shape, kind=codes.dtype.name)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
atom = cls.get_atom_datetime64(shape)
elif is_timedelta64_dtype(dtype):
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
return atom
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=shape[0])
@classmethod
def get_atom_coltype(cls, kind: str) -> Type["Col"]:
""" return the PyTables column class for this column """
if kind.startswith("uint"):
k4 = kind[4:]
col_name = f"UInt{k4}Col"
elif kind.startswith("period"):
# we store as integer
col_name = "Int64Col"
else:
kcap = kind.capitalize()
col_name = f"{kcap}Col"
return getattr(_tables(), col_name)
@classmethod
def get_atom_data(cls, shape, kind: str) -> "Col":
return cls.get_atom_coltype(kind=kind)(shape=shape[0])
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@property
def shape(self):
return getattr(self.data, "shape", None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
"appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
# Note: in tests we never have timedelta64 or datetime64,
# so the _get_data_and_dtype_name may be unnecessary
converted, dtype_name = _get_data_and_dtype_name(values)
kind = _dtype_to_kind(dtype_name)
else:
converted = values
dtype_name = self.dtype
kind = self.kind
assert isinstance(converted, np.ndarray) # for mypy
# use the meta if needed
meta = _ensure_decoded(self.meta)
metadata = self.metadata
ordered = self.ordered
tz = self.tz
assert dtype_name is not None
# convert to the correct dtype
dtype = _ensure_decoded(dtype_name)
# reverse converts
if dtype == "datetime64":
# recreate with tz if indicated
converted = _set_tz(converted, tz, coerce=True)
elif dtype == "timedelta64":
converted = np.asarray(converted, dtype="m8[ns]")
elif dtype == "date":
try:
converted = np.asarray(
[date.fromordinal(v) for v in converted], dtype=object
)
except ValueError:
converted = np.asarray(
[date.fromtimestamp(v) for v in converted], dtype=object
)
elif meta == "category":
# we have a categorical
categories = metadata
codes = converted.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum()._values
converted = Categorical.from_codes(
codes, categories=categories, ordered=ordered
)
else:
try:
converted = converted.astype(dtype, copy=False)
except TypeError:
converted = converted.astype("O", copy=False)
# convert nans / decode
if _ensure_decoded(kind) == "string":
converted = _unconvert_string_array(
converted, nan_rep=nan_rep, encoding=encoding, errors=errors
)
return self.values, converted
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
assert self.dtype is not None
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> "Col":
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
pass
class Fixed:
"""
represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
obj_type: Type[FrameOrSeriesUnion]
ndim: int
encoding: str
parent: HDFStore
group: "Node"
errors: str
is_table = False
def __init__(
self,
parent: HDFStore,
group: "Node",
encoding: str = "UTF-8",
errors: str = "strict",
):
assert isinstance(parent, HDFStore), type(parent)
assert _table_mod is not None # needed for mypy
assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
@property
def is_old_version(self) -> bool:
return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
@property
def version(self) -> Tuple[int, int, int]:
""" compute and set our version """
version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None))
try:
version = tuple(int(x) for x in version.split("."))
if len(version) == 2:
version = version + (0,)
except AttributeError:
version = (0, 0, 0)
return version
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
jshape = ",".join(pprint_thing(x) for x in s)
s = f"[{jshape}]"
return f"{self.pandas_type:12.12} (shape->{s})"
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self) -> int:
return self.parent._complevel
@property
def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self) -> bool:
return False
@property
def nrows(self):
return getattr(self.storable, "nrows", None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
"""
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
"""
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: subclasses should implement"
)
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
_reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes: List[str] = []
# indexer helpers
def _class_to_alias(self, cls) -> str:
return self._index_type_map.get(cls, "")
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, attrs):
index_class = self._alias_to_class(
_ensure_decoded(getattr(attrs, "index_class", ""))
)
factory: Callable
if index_class == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
dta = DatetimeArray._simple_new(values.values, freq=freq)
result = DatetimeIndex._simple_new(dta, name=None)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
factory = f
elif index_class == PeriodIndex:
def f(values, freq=None, tz=None):
parr = PeriodArray._simple_new(values, freq=freq)
return PeriodIndex._simple_new(parr, name=None)
factory = f
else:
factory = index_class
kwargs = {}
if "freq" in attrs:
kwargs["freq"] = attrs["freq"]
if index_class is Index:
# DTI/PI would be gotten by _alias_to_class
factory = TimedeltaIndex
if "tz" in attrs:
if isinstance(attrs["tz"], bytes):
# created by python2
kwargs["tz"] = attrs["tz"].decode("utf-8")
else:
# created by python3
kwargs["tz"] = attrs["tz"]
assert index_class is DatetimeIndex # just checking
return factory, kwargs
def validate_read(self, columns, where):
"""
raise if any keywords are passed which are not-None
"""
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
"a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
"from a Fixed format store. this store must be selected in its entirety"
)
@property
def is_exists(self) -> bool:
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, "transposed", False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = _ensure_decoded(getattr(attrs, "value_type", None))
shape = getattr(attrs, "shape", None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == "datetime64":
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, coerce=True)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
if transposed:
return ret.T
else:
return ret
def read_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
if variety == "multi":
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
def write_index(self, key: str, index: Index):
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_multi_index(self, key: str, index: MultiIndex):
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
zip(index.levels, index.codes, index.names)
):
# write the level
if is_extension_array_dtype(lev):
raise NotImplementedError(
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, f"{key}_name{name}", name)
# write the labels
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
def read_multi_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
codes = []
names: List[Label] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
codes.append(level_codes)
return MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=True
)
def read_index_node(
self, node: "Node", start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we replace it with the original.
if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type)
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if "name" in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
name = _ensure_decoded(name)
attrs = node._v_attrs
factory, kwargs = self._get_index_factory(attrs)
if kind == "date":
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
dtype=object,
**kwargs,
)
else:
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
**kwargs,
)
index.name = name
return index
def write_array_empty(self, key: str, value: ArrayLike):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
node = getattr(self.group, key)
node._v_attrs.value_type = str(value.dtype)
node._v_attrs.shape = value.shape
def write_array(self, key: str, obj: FrameOrSeries, items: Optional[Index] = None):
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
# both self._filters and EA
value = extract_array(obj, extract_numpy=True)
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = value.size == 0
transposed = False
if is_categorical_dtype(value.dtype):
raise NotImplementedError(
"Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
if hasattr(value, "T"):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
atom = None
if self._filters is not None:
with suppress(ValueError):
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
if atom is not None:
# We only get here if self._filters is non-None and
# the Atom.from_dtype call succeeded
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(
self.group, key, atom, value.shape, filters=self._filters
)
ca[:] = value
else:
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value, skipna=False)
if empty_array:
pass
elif inferred_type == "string":
pass
else:
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
elif is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "datetime64"
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key, value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = "datetime64"
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "timedelta64"
elif empty_array:
self.write_array_empty(key, value)
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Label
@property
def shape(self):
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj)
self.attrs.name = obj.name
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
nblocks: int
@property
def shape(self) -> Optional[Shape]:
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, f"block{i}_items")
shape = getattr(node, "shape", None)
if shape is not None:
items += shape[0]
# data shape
node = self.group.block0_values
shape = getattr(node, "shape", None)
if shape is not None:
shape = list(shape[0 : (ndim - 1)])
else:
shape = []
shape.append(items)
return shape
except AttributeError:
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# start, stop applied to rows, so 0th axis only
self.validate_read(columns, where)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
dfs = []
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
columns = items[items.get_indexer(blk_items)]
df = DataFrame(values.T, columns=columns, index=axes[1])
dfs.append(df)
if len(dfs) > 0:
out = concat(dfs, axis=1)
out = out.reindex(columns=items, copy=False)
return out
return DataFrame(columns=axes[0], index=axes[1])
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
data = obj._mgr
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0 and (not ax.is_unique):
raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array(f"block{i}_values", blk.values, items=blk_items)
self.write_index(f"block{i}_items", blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = "frame"
obj_type = DataFrame
class Table(Fixed):
"""
represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
table_type: str
levels: Union[int, List[Label]] = 1
is_table = True
index_axes: List[IndexCol]
non_index_axes: List[Tuple[int, Any]]
values_axes: List[DataCol]
data_columns: List
metadata: List
info: Dict
def __init__(
self,
parent: HDFStore,
group: "Node",
encoding=None,
errors: str = "strict",
index_axes=None,
non_index_axes=None,
values_axes=None,
data_columns=None,
info=None,
nan_rep=None,
):
super().__init__(parent, group, encoding=encoding, errors=errors)
self.index_axes = index_axes or []
self.non_index_axes = non_index_axes or []
self.values_axes = values_axes or []
self.data_columns = data_columns or []
self.info = info or {}
self.nan_rep = nan_rep
@property
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
dc = f",dc->[{jdc}]"
ver = ""
if self.is_old_version:
jver = ".".join(str(x) for x in self.version)
ver = f"[{jver}]"
jindex_axes = ",".join(a.name for a in self.index_axes)
return (
f"{self.pandas_type:12.12}{ver} "
f"(typ->{self.table_type_short},nrows->{self.nrows},"
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
def __getitem__(self, c: str):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
f"[{other.table_type} - {self.table_type}]"
)
for c in ["index_axes", "non_index_axes", "values_axes"]:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
f"invalid combination of [{c}] on appending data "
f"[{sax}] vs current table [{oax}]"
)
# should never get here
raise Exception(
f"invalid combination of [{c}] on appending data [{sv}] vs "
f"current table [{ov}]"
)
@property
def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_multiindex(
self, obj: FrameOrSeriesUnion
) -> Tuple[DataFrame, List[Label]]:
"""
validate that we can store the multi-index; reset and return the
new object
"""
levels = [
l if l is not None else f"level_{i}" for i, l in enumerate(obj.index.names)
]
try:
reset_obj = obj.reset_index()
except ValueError as err:
raise ValueError(
"duplicate names/columns in the multi-index when storing as a table"
) from err
assert isinstance(reset_obj, DataFrame) # for mypy
return reset_obj, levels
@property
def nrows_expected(self) -> int:
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self) -> bool:
""" has this table been created """
return "table" in self.group
@property
def storable(self):
return getattr(self.group, "table", None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self) -> int:
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self) -> bool:
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(
itertools.chain(
[int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes],
)
)
def queryables(self) -> Dict[str, Any]:
""" return a dict of the kinds allowable columns for this object """
# mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here
axis_names = {0: "index", 1: "columns"}
# compute the values_axes queryables
d1 = [(a.cname, a) for a in self.index_axes]
d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]
d3 = [
(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
]
# error: Unsupported operand types for + ("List[Tuple[str, IndexCol]]"
# and "List[Tuple[str, None]]")
return dict(d1 + d2 + d3) # type: ignore[operator]
def index_cols(self):
""" return a list of my index cols """
# Note: each `i.cname` below is assured to be a str.
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self) -> List[str]:
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key: str) -> str:
""" return the metadata pathname for this key """
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
def write_metadata(self, key: str, values: np.ndarray):
"""
Write out a metadata array to the key as a fixed-format Series.
Parameters
----------
key : str
values : ndarray
"""
values = Series(values)
self.parent.put(
self._get_metadata_path(key),
values,
format="table",
encoding=self.encoding,
errors=self.errors,
nan_rep=self.nan_rep,
)
def read_metadata(self, key: str):
""" return the meta data array for this key """
if getattr(getattr(self.group, "meta", None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.info = self.info
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
self.data_columns = getattr(self.attrs, "data_columns", None) or []
self.info = getattr(self.attrs, "info", None) or {}
self.nan_rep = getattr(self.attrs, "nan_rep", None)
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
self.levels: List[Label] = getattr(self.attrs, "levels", None) or []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""
validate the min_itemsize doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == "values":
continue
if k not in q:
raise ValueError(
f"min_itemsize has the key [{k}] which is not an axis or "
"data_column"
)
@cache_readonly
def indexables(self):
""" create/cache the indexables if they don't exist """
_indexables = []
desc = self.description
table_attrs = self.table.attrs
# Note: each of the `name` kwargs below are str, ensured
# by the definition in index_cols.
# index columns
for i, (axis, name) in enumerate(self.attrs.index_cols):
atom = getattr(desc, name)
md = self.read_metadata(name)
meta = "category" if md is not None else None
kind_attr = f"{name}_kind"
kind = getattr(table_attrs, kind_attr, None)
index_col = IndexCol(
name=name,
axis=axis,
pos=i,
kind=kind,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(index_col)
# values columns
dc = set(self.data_columns)
base_pos = len(_indexables)
def f(i, c):
assert isinstance(c, str)
klass = DataCol
if c in dc:
klass = DataIndexableCol
atom = getattr(desc, c)
adj_name = _maybe_adjust_name(c, self.version)
# TODO: why kind_attr here?
values = getattr(table_attrs, f"{adj_name}_kind", None)
dtype = getattr(table_attrs, f"{adj_name}_dtype", None)
kind = _dtype_to_kind(dtype)
md = self.read_metadata(c)
# TODO: figure out why these two versions of `meta` dont always match.
# meta = "category" if md is not None else None
meta = getattr(table_attrs, f"{adj_name}_meta", None)
obj = klass(
name=adj_name,
cname=c,
values=values,
kind=kind,
pos=base_pos + i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
dtype=dtype,
)
return obj
# Note: the definition of `values_cols` ensures that each
# `c` below is a str.
_indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return _indexables
def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"""
Create a pytables index on the specified columns.
Parameters
----------
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError if trying to create an index on a complex-type column.
Notes
-----
Cannot index Time64Col or ComplexCol.
Pytables must be >= 3.0.
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = {}
if optlevel is not None:
kw["optlevel"] = optlevel
if kind is not None:
kw["kind"] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw["kind"] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw["optlevel"] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith("complex"):
raise TypeError(
"Columns containing complex values can be stored but "
"cannot be indexed when using table format. Either use "
"fixed format, set index=False, or do not include "
"the columns containing complex values to "
"data_columns when initializing the table."
)
v.create_index(**kw)
elif c in self.non_index_axes[0][1]:
# GH 28156
raise AttributeError(
f"column {c} is not a data_column.\n"
f"In order to read column {c} you must reload the dataframe \n"
f"into HDFStore and include {c} with the data_columns argument."
)
def _read_axes(
self, where, start: Optional[int] = None, stop: Optional[int] = None
) -> List[Tuple[ArrayLike, ArrayLike]]:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
"""
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
results = []
# convert the data
for a in self.axes:
a.set_info(self.info)
res = a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
results.append(res)
return results
@classmethod
def get_object(cls, obj, transposed: bool):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
"""
take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(non_index_axes):
return []
axis, axis_labels = non_index_axes[0]
info = self.info.get(axis, {})
if info.get("type") == "MultiIndex" and data_columns:
raise ValueError(
f"cannot use a multi-index on axis [{axis}] with "
f"data_columns {data_columns}"
)
# evaluate the passed data_columns, True == use all columns
# take only valid axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns = list(data_columns) # ensure we do not modify
data_columns.extend(
[
k
for k in min_itemsize.keys()
if k != "values" and k not in existing_data_columns
]
)
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def _create_axes(
self,
axes,
obj: DataFrame,
validate: bool = True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
):
"""
Create and return the axes.
Parameters
----------
axes: list or None
The names or numbers of the axes to create.
obj : DataFrame
The object to create axes on.
validate: bool, default True
Whether to validate the obj against an existing object already written.
nan_rep :
A value to use for string column nan_rep.
data_columns : List[str], True, or None, default None
Specify the columns that we want to create to allow indexing on.
* True : Use all available columns.
* None : Use no columns.
* List[str] : Use the specified columns.
min_itemsize: Dict[str, int] or None, default None
The min itemsize for a column in bytes.
"""
if not isinstance(obj, DataFrame):
group = self.group._v_name
raise TypeError(
f"cannot properly create the storer for: [group->{group},"
f"value->{type(obj)}]"
)
# set the default axes if needed
if axes is None:
axes = [0]
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
table_exists = True
axes = [a.axis for a in self.index_axes]
data_columns = list(self.data_columns)
nan_rep = self.nan_rep
# TODO: do we always have validate=True here?
else:
table_exists = False
new_info = self.info
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable"
)
# create according to the new data
new_non_index_axes: List = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
# We construct the non-index-axis first, since that alters new_info
idx = [x for x in [0, 1] if x not in axes][0]
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
if table_exists:
indexer = len(new_non_index_axes) # i.e. 0
exist_axis = self.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis), np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(
np.array(sorted(append_axis)), np.array(sorted(exist_axis))
):
append_axis = exist_axis
# the non_index_axes info
info = new_info.setdefault(idx, {})
info["names"] = list(a.names)
info["type"] = type(a).__name__
new_non_index_axes.append((idx, append_axis))
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
axis_name = obj._get_axis_name(idx)
new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
# we know it will have pos=0
new_index.set_pos(0)
new_index.update_info(new_info)
new_index.maybe_set_size(min_itemsize) # check for column conflicts
new_index_axes = [new_index]
j = len(new_index_axes) # i.e. 1
assert j == 1
# reindex by our non_index_axes & compute data_columns
assert len(new_non_index_axes) == 1
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
block_obj = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
block_obj, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
vaxes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
# make sure that we match up the existing columns
# if we have an existing table
existing_col: Optional[DataCol]
if table_exists and validate:
try:
existing_col = self.values_axes[i]
except (IndexError, KeyError) as err:
raise ValueError(
f"Incompatible appended table [{blocks}]"
f"with existing table [{self.values_axes}]"
) from err
else:
existing_col = None
new_name = name or f"values_block_{i}"
data_converted = _maybe_convert_for_string_atom(
new_name,
b,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
)
adj_name = _maybe_adjust_name(new_name, self.version)
typ = klass._get_atom(data_converted)
kind = _dtype_to_kind(data_converted.dtype.name)
tz = _get_tz(data_converted.tz) if hasattr(data_converted, "tz") else None
meta = metadata = ordered = None
if is_categorical_dtype(data_converted.dtype):
ordered = data_converted.ordered
meta = "category"
metadata = np.array(data_converted.categories, copy=False).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
col = klass(
name=adj_name,
cname=new_name,
values=list(b_items),
typ=typ,
pos=j,
kind=kind,
tz=tz,
ordered=ordered,
meta=meta,
metadata=metadata,
dtype=dtype_name,
data=data,
)
col.update_info(new_info)
vaxes.append(col)
j += 1
dcs = [col.name for col in vaxes if col.is_data_indexable]
new_table = type(self)(
parent=self.parent,
group=self.group,
encoding=self.encoding,
errors=self.errors,
index_axes=new_index_axes,
non_index_axes=new_non_index_axes,
values_axes=vaxes,
data_columns=dcs,
info=new_info,
nan_rep=nan_rep,
)
if hasattr(self, "levels"):
# TODO: get this into constructor, only for appropriate subclass
new_table.levels = self.levels
new_table.validate_min_itemsize(min_itemsize)
if validate and table_exists:
new_table.validate(self)
return new_table
@staticmethod
def _get_blocks_and_items(
block_obj, table_exists, new_non_index_axes, values_axes, data_columns
):
# Helper to clarify non-state-altering parts of _create_axes
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
blocks = block_obj._mgr.blocks
blk_items = get_blk_items(block_obj._mgr, blocks)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
mgr = block_obj.reindex(new_labels, axis=axis)._mgr
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex([c], axis=axis)._mgr
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError) as err:
jitems = ",".join(pprint_thing(item) for item in items)
raise ValueError(
f"cannot match existing table structure for [{jitems}] "
"on appending data"
) from err
blocks = new_blocks
blk_items = new_blk_items
return blocks, blk_items
def process_axes(self, obj, selection: "Selection", columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
assert isinstance(self.levels, list) # assured by is_multi_index
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if selection.filter is not None:
for field, op, filt in selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_ORDERS:
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc(axis=axis_number)[takers]
raise ValueError(f"cannot find the field [{field}] for filtering!")
obj = process_filter(field, filt)
return obj
def create_description(
self,
complib,
complevel: Optional[int],
fletcher32: bool,
expectedrows: Optional[int],
) -> Dict[str, Any]:
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = {"name": "table", "expectedrows": expectedrows}
# description from the axes & values
d["description"] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel,
complib=complib,
fletcher32=fletcher32 or self._fletcher32,
)
d["filters"] = filters
elif self._filters is not None:
d["filters"] = self._filters
return d
def read_coordinates(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
coords = selection.select_coords()
if selection.filter is not None:
for field, op, filt in selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1
)
coords = coords[op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(
self,
column: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
"it is not data indexable"
)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
col_values = a.convert(
c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
return Series(_set_tz(col_values[1], a.tz), name=column)
raise KeyError(f"column [{column}] not found in the table")
class WORMTable(Table):
"""
a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = "worm"
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
read the indices and the indexing array, calculate offset rows and return
"""
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
"""
write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
class AppendableTable(Table):
""" support the new appendable table formats """
table_type = "appendable"
def write(
self,
obj,
axes=None,
append=False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
track_times=True,
):
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in table.axes:
a.validate_names()
if not table.is_exists:
# create the table
options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
table.set_attrs()
options["track_times"] = track_times
# create the table
table._handle.create_table(table.group, **options)
# update my info
table.attrs.info = table.info
# validate the axes and set the kinds
for a in table.axes:
a.validate_and_set(table, append)
# add the rows
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: Optional[int], dropna: bool = False):
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(
self,
rows: np.ndarray,
indexes: List[np.ndarray],
mask: Optional[np.ndarray],
values: List[np.ndarray],
):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" support the new appendable table formats """
pandas_kind = "frame_table"
table_type = "appendable_frame"
ndim = 2
obj_type: Type[FrameOrSeriesUnion] = DataFrame
@property
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
@classmethod
def get_object(cls, obj, transposed: bool):
""" these are written transposed """
if transposed:
obj = obj.T
return obj
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return None
result = self._read_axes(where=where, start=start, stop=stop)
info = (
self.info.get(self.non_index_axes[0][0], {})
if len(self.non_index_axes)
else {}
)
inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]]
assert len(inds) == 1
ind = inds[0]
index = result[ind][0]
frames = []
for i, a in enumerate(self.axes):
if a not in self.values_axes:
continue
index_vals, cvalues = result[i]
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get("type") == "MultiIndex":
cols = MultiIndex.from_tuples(index_vals)
else:
cols = Index(index_vals)
names = info.get("names")
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, "name", None))
else:
values = cvalues.T
index_ = Index(index, name=getattr(index, "name", None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
if isinstance(values, np.ndarray):
df = DataFrame(values.T, columns=cols_, index=index_)
elif isinstance(values, Index):
df = DataFrame(values, columns=cols_, index=index_)
else:
# Categorical
df = DataFrame([values], columns=cols_, index=index_)
assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
frames.append(df)
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_series"
ndim = 2
obj_type = Series
@property
def is_transposed(self) -> bool:
return False
@classmethod
def get_object(cls, obj, transposed: bool):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or "values"
obj = obj.to_frame(name)
return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
) -> Series:
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
assert isinstance(self.levels, list) # needed for mypy
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super().read(where=where, columns=columns, start=start, stop=stop)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == "values":
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_multiseries"
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or "values"
newobj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
cols = list(self.levels)
cols.append(name)
newobj.columns = Index(cols)
return super().write(obj=newobj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
levels: List[Label]
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
""" create the indexables from the table description """
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
_indexables: List[Union[GenericIndexCol, GenericDataIndexableCol]] = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(dc)
return _indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = "appendable_multiframe"
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self) -> str:
return "appendable_multi"
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super().write(obj=obj, data_columns=data_columns, **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
df = super().read(where=where, columns=columns, start=start, stop=stop)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names(
[None if self._re_levels.search(name) else name for name in df.index.names]
)
return df
def _reindex_axis(obj: DataFrame, axis: int, labels: Index, other=None) -> DataFrame:
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer: List[Union[slice, Index]] = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
# tz to/from coercion
def _get_tz(tz: tzinfo) -> Union[str, tzinfo]:
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
return zone
def _set_tz(
values: Union[np.ndarray, Index],
tz: Optional[Union[str, tzinfo]],
coerce: bool = False,
) -> Union[np.ndarray, DatetimeIndex]:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if isinstance(values, DatetimeIndex):
# If values is tzaware, the tz gets dropped in the values.ravel()
# call below (which returns an ndarray). So we are only non-lossy
# if `tz` matches `values.tz`.
assert values.tz is None or values.tz == tz
if tz is not None:
if isinstance(values, DatetimeIndex):
name = values.name
values = values.asi8
else:
name = None
values = values.ravel()
tz = _ensure_decoded(tz)
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
return values
def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol:
assert isinstance(name, str)
index_name = index.name
converted, dtype_name = _get_data_and_dtype_name(index)
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
if isinstance(index, Int64Index) or needs_i8_conversion(index.dtype):
# Includes Int64Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
return IndexCol(
name,
values=converted,
kind=kind,
typ=atom,
freq=getattr(index, "freq", None),
tz=getattr(index, "tz", None),
index_name=index_name,
)
if isinstance(index, MultiIndex):
raise TypeError("MultiIndex not supported here!")
inferred_type = lib.infer_dtype(index, skipna=False)
# we won't get inferred_type of "datetime64" or "timedelta64" as these
# would go through the DatetimeIndex/TimedeltaIndex paths above
values = np.asarray(index)
if inferred_type == "date":
converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
return IndexCol(
name, converted, "date", _tables().Time32Col(), index_name=index_name
)
elif inferred_type == "string":
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
name,
converted,
"string",
_tables().StringCol(itemsize),
index_name=index_name,
)
elif inferred_type in ["integer", "floating"]:
return IndexCol(
name, values=converted, kind=kind, typ=atom, index_name=index_name
)
else:
assert isinstance(converted, np.ndarray) and converted.dtype == object
assert kind == "object", kind
atom = _tables().ObjectAtom()
return IndexCol(name, converted, kind, atom, index_name=index_name)
def _unconvert_index(
data, kind: str, encoding: str, errors: str
) -> Union[np.ndarray, Index]:
index: Union[Index, np.ndarray]
if kind == "datetime64":
index = DatetimeIndex(data)
elif kind == "timedelta64":
index = TimedeltaIndex(data)
elif kind == "date":
try:
index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
elif kind in ("integer", "float"):
index = np.asarray(data)
elif kind in ("string"):
index = _unconvert_string_array(
data, nan_rep=None, encoding=encoding, errors=errors
)
elif kind == "object":
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError(f"unrecognized index type {kind}")
return index
def _maybe_convert_for_string_atom(
name: str, block, existing_col, min_itemsize, nan_rep, encoding, errors
):
if not block.is_object:
return block.values
dtype_name = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == "date":
raise TypeError("[date] is not implemented as a table column")
elif inferred_type == "datetime":
# after GH#8260
# this only would be hit for a multi-timezone dtype which is an error
raise TypeError(
"too many timezones in this block, create separate data columns"
)
elif not (inferred_type == "string" or dtype_name == "object"):
return block.values
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
# Note: because block is always object dtype, fillna goes
# through a path such that the result is always a 1-element list
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data, skipna=False)
if inferred_type != "string":
# we cannot serialize this data, so report an exception on a column
# by column basis
for i in range(len(block.shape[0])):
col = block.iget(i)
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
iloc = block.mgr_locs.indexer[i]
raise TypeError(
f"Cannot serialize the column [{iloc}] because\n"
f"its data contents are [{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
assert data_converted.shape == block.shape, (data_converted.shape, block.shape)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
return data_converted
def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:
"""
Take a string-like that is object dtype and coerce to a fixed size string type.
Parameters
----------
data : np.ndarray[object]
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[fixed-length-string]
"""
# encode if needed
if len(data):
data = (
Series(data.ravel())
.str.encode(encoding, errors)
._values.reshape(data.shape)
)
# create the sized dtype
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype=f"S{itemsize}")
return data
def _unconvert_string_array(
data: np.ndarray, nan_rep, encoding: str, errors: str
) -> np.ndarray:
"""
Inverse of _convert_string_array.
Parameters
----------
data : np.ndarray[fixed-length-string]
nan_rep : the storage repr of NaN
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[object]
Decoded data.
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
if len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
dtype = f"U{itemsize}"
if isinstance(data[0], bytes):
data = Series(data).str.decode(encoding, errors=errors)._values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = "nan"
data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str):
assert isinstance(val_kind, str), type(val_kind)
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
values = conv(values)
return values
def _get_converter(kind: str, encoding: str, errors: str):
if kind == "datetime64":
return lambda x: np.asarray(x, dtype="M8[ns]")
elif kind == "string":
return lambda x: _unconvert_string_array(
x, nan_rep=None, encoding=encoding, errors=errors
)
else: # pragma: no cover
raise ValueError(f"invalid kind {kind}")
def _need_convert(kind: str) -> bool:
if kind in ("datetime64", "string"):
return True
return False
def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
Parameters
----------
name : str
version : Tuple[int, int, int]
Returns
-------
str
"""
if isinstance(version, str) or len(version) < 3:
raise ValueError("Version is incorrect, expected sequence of 3 integers.")
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
grp = m.groups()[0]
name = f"values_{grp}"
return name
def _dtype_to_kind(dtype_str: str) -> str:
"""
Find the "kind" string describing the given dtype name.
"""
dtype_str = _ensure_decoded(dtype_str)
if dtype_str.startswith("string") or dtype_str.startswith("bytes"):
kind = "string"
elif dtype_str.startswith("float"):
kind = "float"
elif dtype_str.startswith("complex"):
kind = "complex"
elif dtype_str.startswith("int") or dtype_str.startswith("uint"):
kind = "integer"
elif dtype_str.startswith("datetime64"):
kind = "datetime64"
elif dtype_str.startswith("timedelta"):
kind = "timedelta64"
elif dtype_str.startswith("bool"):
kind = "bool"
elif dtype_str.startswith("category"):
kind = "category"
elif dtype_str.startswith("period"):
# We store the `freq` attr so we can restore from integers
kind = "integer"
elif dtype_str == "object":
kind = "object"
else:
raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
return kind
def _get_data_and_dtype_name(data: ArrayLike):
"""
Convert the passed data into a storable form and a dtype string.
"""
if isinstance(data, Categorical):
data = data.codes
# For datetime64tz we need to drop the TZ in tests TODO: why?
dtype_name = data.dtype.name.split("[")[0]
if data.dtype.kind in ["m", "M"]:
data = np.asarray(data.view("i8"))
# TODO: we used to reshape for the dt64tz case, but no longer
# doing that doesn't seem to break anything. why?
elif isinstance(data, PeriodIndex):
data = data.asi8
data = np.asarray(data)
return data, dtype_name
class Selection:
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(
self,
table: Table,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred == "integer" or inferred == "boolean":
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if (self.start is not None and (where < self.start).any()) or (
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
"where must have index locations >= start and < stop"
)
self.coordinates = where
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
except NameError as err:
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
msg = dedent(
f"""\
The passed where expression: {where}
contains an invalid variable reference
all of the variable references must be a reference to
an axis (e.g. 'index' or 'columns'), or a data_column
The currently defined references are: {qkeys}
"""
)
raise ValueError(msg) from err
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(
self.condition.format(), start=self.start, stop=self.stop
)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(
self.condition.format(), start=start, stop=stop, sort=True
)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
| bsd-3-clause |
ToFuProject/tofu | tofu/data/_core.py | 1 | 174350 | # -*- coding: utf-8 -*-
# Built-in
import sys
import os
import itertools as itt
import copy
import warnings
from abc import ABCMeta, abstractmethod
import inspect
# Common
import numpy as np
import scipy.interpolate as scpinterp
import matplotlib.pyplot as plt
from matplotlib.tri import Triangulation as mplTri
# tofu
from tofu import __version__ as __version__
import tofu.pathfile as tfpf
import tofu.utils as utils
try:
import tofu.data._comp as _comp
import tofu.data._plot as _plot
import tofu.data._def as _def
import tofu._physics as _physics
import tofu.data._spectrafit2d as _spectrafit2d
except Exception:
from . import _comp as _comp
from . import _plot as _plot
from . import _def as _def
from .. import _physics as _physics
from . import _spectrafit2d as _spectrafit2d
__all__ = ['DataCam1D','DataCam2D',
'DataCam1DSpectral','DataCam2DSpectral',
'Plasma2D']
_INTERPT = 'zero'
#############################################
# utils
#############################################
def _format_ind(ind=None, n=None):
"""Helper routine to convert selected channels (as numbers) in `ind` to
a boolean array format.
Parameters
----------
ind : integer, or list of integers
A channel or a list of channels that the user wants to select.
n : integer, or None
The total number of available channels.
Returns
-------
ind : ndarray of booleans, size (n,)
The array with the selected channels set to True, remaining ones set
to False
Examples
--------
>>> _format_ind(ind=[0, 3], n=4)
[True, False, False, True]
"""
if ind is None:
ind = np.ones((n,),dtype=bool)
else:
# list of accepted integer types
lInt = [int, np.int64, np.int32, np.int_, np.longlong]
if type(ind) in lInt:
ii = np.zeros((n,),dtype=bool)
ii[int(ii)] = True
ind = ii
else:
assert hasattr(ind,'__iter__')
if type(ind[0]) in [bool,np.bool_]:
ind = np.asarray(ind).astype(bool)
assert ind.size==n
elif type(ind[0]) in lInt:
ind = np.asarray(ind).astype(int)
ii = np.zeros((n,),dtype=bool)
ii[ind] = True
ind = ii
else:
msg = ("Index must be int, or an iterable of bool or int "
"(first element of index has"
" type: {})!".format(type(ind[0]))
)
raise Exception(msg)
return ind
def _select_ind(v, ref, nRef):
ltypes = [int,float,np.int64,np.float64]
C0 = type(v) in ltypes
C1 = type(v) is np.ndarray
C2 = type(v) is list
C3 = type(v) is tuple
assert v is None or np.sum([C0,C1,C2,C3])==1
nnRef = 1 if ref.ndim==1 else ref.shape[0]
ind = np.zeros((nnRef,nRef),dtype=bool)
if v is None:
ind = ~ind
elif C0 or C1:
if C0:
v = np.r_[v]
# Traditional :
#for vv in v:
# ind[np.nanargmin(np.abs(ref-vv))] = True
# Faster with digitize :
if ref.ndim==1:
ind[0,np.digitize(v, (ref[1:]+ref[:-1])/2.)] = True
elif ref.ndim==2:
for ii in range(0,ref.shape[0]):
ind[ii,np.digitize(v, (ref[ii,1:]+ref[ii,:-1])/2.)] = True
elif C2 or C3:
c0 = len(v)==2 and all([type(vv) in ltypes for vv in v])
c1 = all([(type(vv) is type(v) and len(vv)==2
and all([type(vvv) in ltypes for vvv in vv]))
for vv in v])
assert c0!=c1
if c0:
v = [v]
for vv in v:
ind = ind | ((ref>=vv[0]) & (ref<=vv[1]))
if C3:
ind = ~ind
if ref.ndim == 1:
ind = np.atleast_1d(ind.squeeze())
return ind
#############################################
# class
#############################################
class DataAbstract(utils.ToFuObject):
__metaclass__ = ABCMeta
# Fixed (class-wise) dictionary of default properties
_ddef = {'Id':{'include':['Mod','Cls','Exp','Diag',
'Name','shot','version']},
'dtreat':{'order':['mask','interp-indt','interp-indch','data0','dfit',
'indt', 'indch', 'indlamb', 'interp-t']}}
# Does not exist before Python 3.6 !!!
def __init_subclass__(cls, **kwdargs):
# Python 2
super(DataAbstract,cls).__init_subclass__(**kwdargs)
# Python 3
#super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(DataAbstract._ddef)
#cls._dplot = copy.deepcopy(Struct._dplot)
#cls._set_color_ddef(cls._color)
def __init__(self, data=None, t=None, X=None, lamb=None,
dchans=None, dlabels=None, dX12='geom',
Id=None, Name=None, Exp=None, shot=None, Diag=None,
dextra=None, lCam=None, config=None,
fromdict=None, sep=None, SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude):
# Create a dplot at instance level
#self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs['self']
# super()
super(DataAbstract,self).__init__(**kwdargs)
def _reset(self):
# super()
super(DataAbstract,self)._reset()
self._ddataRef = dict.fromkeys(self._get_keys_ddataRef())
self._dtreat = dict.fromkeys(self._get_keys_dtreat())
self._ddata = dict.fromkeys(self._get_keys_ddata())
self._dlabels = dict.fromkeys(self._get_keys_dlabels())
self._dgeom = dict.fromkeys(self._get_keys_dgeom())
self._dchans = dict.fromkeys(self._get_keys_dchans())
self._dextra = dict.fromkeys(self._get_keys_dextra())
if self._is2D():
self._dX12 = dict.fromkeys(self._get_keys_dX12())
@classmethod
def _checkformat_inputs_Id(cls, Id=None, Name=None,
Exp=None, shot=None,
Diag=None, include=None,
**kwdargs):
if Id is not None:
if not isinstance(Id, utils.ID):
msg = ("Arg Id must be a utils.ID instance!\n"
+ "\t- provided: {}".format(Id))
raise Exception(msg)
Name, Exp, shot, Diag = Id.Name, Id.Exp, Id.shot, Id.Diag
assert type(Name) is str, Name
assert type(Diag) is str, Diag
assert type(Exp) is str, Exp
if include is None:
include = cls._ddef['Id']['include']
assert shot is None or type(shot) in [int,np.int64]
if shot is None:
if 'shot' in include:
include.remove('shot')
else:
shot = int(shot)
if 'shot' not in include:
include.append('shot')
kwdargs.update({'Name':Name, 'Exp':Exp, 'shot':shot,
'Diag':Diag, 'include':include})
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_ddataRef():
largs = ['data','t',
'X', 'indtX',
'lamb', 'indtlamb', 'indXlamb', 'indtXlamb']
return largs
@staticmethod
def _get_largs_ddata():
largs = []
return largs
@staticmethod
def _get_largs_dtreat():
largs = ['dtreat']
return largs
@staticmethod
def _get_largs_dlabels():
largs = ['dlabels']
return largs
@staticmethod
def _get_largs_dgeom():
largs = ['lCam','config']
return largs
@staticmethod
def _get_largs_dX12():
largs = ['dX12']
return largs
@staticmethod
def _get_largs_dchans():
largs = ['dchans']
return largs
@staticmethod
def _get_largs_dextra():
largs = ['dextra']
return largs
###########
# Get check and format inputs
###########
def _checkformat_inputs_ddataRef(self, data=None, t=None,
X=None, indtX=None,
lamb=None, indtlamb=None,
indXlamb=None, indtXlamb=None):
if data is None:
msg = "data can not be None!"
raise Exception(msg)
data = np.atleast_1d(np.asarray(data).squeeze())
if data.ndim == 1:
data = data.reshape((1, data.size))
if t is not None:
t = np.atleast_1d(np.asarray(t).squeeze())
if X is not None:
X = np.atleast_1d(np.asarray(X).squeeze())
if indtX is not None:
indtX = np.atleast_1d(np.asarray(indtX, dtype=int).squeeze())
if lamb is not None:
lamb = np.atleast_1d(np.asarray(lamb).squeeze())
if indtlamb is not None:
indtlamb = np.atleast_1d(np.asarray(indtlamb, dtype=int).squeeze())
if indXlamb is not None:
indXlamb = np.atleast_1d(np.asarray(indXlamb, dtype=int).squeeze())
if indtXlamb is not None:
indtXlamb = np.atleast_1d(np.asarray(indtXlamb,
dtype=int).squeeze())
ndim = data.ndim
assert ndim in [2,3]
if not self._isSpectral():
msg = ("self is not of spectral type\n"
+ " => the data cannot be 3D ! (ndim)")
assert ndim==2, msg
nt = data.shape[0]
if t is None:
t = np.arange(0,nt)
else:
if t.shape != (nt,):
msg = ("Wrong time dimension\n"
+ "\t- t.shape = {}\n".format(t.shape)
+ "\t- nt = {}".format(nt))
raise Exception(msg)
n1 = data.shape[1]
if ndim==2:
lC = [X is None, lamb is None]
if not any(lC):
msg = "Please provide at least X or lamb (both are None)!"
raise Exception(msg)
if all(lC):
if self._isSpectral():
X = np.array([0])
lamb = np.arange(0,n1)
data = data.reshape((nt,1,n1))
else:
X = np.arange(0,n1)
elif lC[0]:
if not self._isSpectral():
msg = "lamb provided => self._isSpectral() must be True!"
raise Exception(msg)
X = np.array([0])
data = data.reshape((nt, 1, n1))
if lamb.ndim not in [1, 2]:
msg = ("lamb.ndim must be in [1, 2]\n"
+ "\t- lamb.shape = {}".format(lamb.shape))
raise Exception(msg)
if lamb.ndim == 1:
if lamb.size != n1:
msg = ("lamb has wrong size!\n"
+ "\t- expected: {}".format(n1)
+ "\t- provided: {}".format(lamb.size))
raise Exception(msg)
elif lamb.ndim == 2:
if lamb.shape[1] != n1:
msg = ("lamb has wrong shape!\n"
+ "\t- expected: (.., {})".format(n1)
+ "\t- provided: {}".format(lamb.shape))
raise Exception(msg)
else:
if self._isSpectral():
msg = "object cannot be spectral!"
raise Exception(msg)
if X.ndim not in [1, 2] or X.shape[-1] != n1:
msg = ("X.ndim should be in [1, 2]\n"
+ "\t- expected: (..., {})\n".format(n1)
+ "\t- provided: {}".format(X.shape))
raise Exception(msg)
else:
assert self._isSpectral()
n2 = data.shape[2]
lC = [X is None, lamb is None]
if lC[0]:
X = np.arange(0,n1)
else:
assert X.ndim in [1,2]
assert X.shape[-1]==n1
if lC[1]:
lamb = np.arange(0,n2)
else:
assert lamb.ndim in [1,2]
if lamb.ndim==1:
assert lamb.size==n2
else:
assert lamb.shape[1]==n2
if X.ndim==1:
X = np.array([X])
if lamb is not None and lamb.ndim==1:
lamb = np.array([lamb])
# Get shapes
nt, nch = data.shape[:2]
nnch = X.shape[0]
if data.ndim==3:
nnlamb, nlamb = lamb.shape
else:
nnlamb, nlamb = 0, 0
# Check indices
if indtX is not None:
assert indtX.shape==(nt,)
assert np.min(indtX)>=0 and np.max(indtX)<=nnch
lC = [indtlamb is None, indXlamb is None, indtXlamb is None]
assert lC[2] or (~lC[2] and np.sum(lC[:2])==2)
if lC[2]:
if not lC[0]:
assert indtlamb.shape==(nt,)
assert np.min(indtlamb) >= 0 and np.max(indtlamb) <= nnlamb
if not lC[1]:
assert indXlamb.shape == (nch,)
assert np.min(indXlamb) >= 0 and np.max(indXlamb) <= nnlamb
else:
assert indtXlamb.shape == (nt, nch)
assert np.min(indtXlamb) >= 0 and np.max(indtXlamb) <= nnlamb
# Check consistency X/lamb shapes vs indices
if X is not None and indtX is None:
assert nnch in [1,nt]
if lamb is not None:
if all([ii is None for ii in [indtlamb,indXlamb,indtXlamb]]):
assert nnlamb in [1,nch]
l = [data, t, X, lamb, nt, nch, nlamb, nnch, nnlamb,
indtX, indtlamb, indXlamb, indtXlamb]
return l
def _checkformat_inputs_XRef(self, X=None, indtX=None, indXlamb=None):
if X is not None:
X = np.atleast_1d(np.asarray(X).squeeze())
if indtX is not None:
indtX = np.atleast_1d(np.asarray(indtX).squeeze())
if indXlamb is not None:
indXlamb = np.atleast_1d(np.asarray(indXlamb).squeeze())
ndim = self._ddataRef['data'].ndim
nt, n1 = self._ddataRef['data'].shape[:2]
if ndim==2:
if X is None:
if self._isSpectral():
X = np.array([0])
else:
X = np.arange(0,n1)
else:
assert not self._isSpectral()
assert X.ndim in [1,2]
assert X.shape[-1]==n1
else:
if X is None:
X = np.arange(0,n1)
else:
assert X.ndim in [1,2]
assert X.shape[-1]==n1
if X.ndim==1:
X = np.array([X])
# Get shapes
nnch, nch = X.shape
# Check indices
if indtX is None:
indtX = self._ddataRef['indtX']
if indtX is not None:
assert indtX.shape == (nt,)
assert np.argmin(indtX) >= 0 and np.argmax(indtX) <= nnch
if indXlamb is None:
indXlamb = self._ddataRef['indXlamb']
if indtXlamb is None:
indtXlamb = self._ddataRef['indtXlamb']
if indtXlamb is not None:
assert indXlamb is None
assert indXlamb.shape==(nch,)
assert (np.argmin(indXlamb)>=0
and np.argmax(indXlamb)<=self._ddataRef['nnlamb'])
else:
assert indXlamb is None
assert indtXlamb.shape==(nt,nch)
assert (np.argmin(indtXlamb)>=0
and np.argmax(indtXlamb)<=self._ddataRef['nnlamb'])
return X, nnch, indtX, indXlamb, indtXlamb
def _checkformat_inputs_dlabels(self, dlabels=None):
if dlabels is None:
dlabels = {}
assert type(dlabels) is dict
lk = ['data','t','X']
if self._isSpectral():
lk.append('lamb')
for k in lk:
if not k in dlabels.keys():
dlabels[k] = {'name': k, 'units':'a.u.'}
assert type(dlabels[k]) is dict
assert all([s in dlabels[k].keys() for s in ['name','units']])
assert type(dlabels[k]['name']) is str
assert type(dlabels[k]['units']) is str
return dlabels
def _checkformat_inputs_dtreat(self, dtreat=None):
if dtreat is None:
dtreat = {}
assert type(dtreat) is dict
lk0 = self._get_keys_dtreat()
lk = dtreat.keys()
for k in lk:
assert k in lk0
for k in lk0:
if k not in lk:
if k in self._ddef['dtreat'].keys():
dtreat[k] = self._ddef['dtreat'][k]
else:
dtreat[k] = None
if k == 'order':
if dtreat[k] is None:
dtreat[k] = self.__class__._ddef['dtreat']['order']
assert type(dtreat[k]) is list
assert dtreat[k][-1] == 'interp-t'
assert all([ss in dtreat[k][-4:-1]
for ss in ['indt','indch','indlamb']])
return dtreat
def _checkformat_inputs_dgeom(self, lCam=None, config=None):
if config is not None:
assert lCam is None
nC = 0
elif lCam is None:
nC = 0
else:
if type(lCam) is not list:
lCam = [lCam]
nC = len(lCam)
# Check type consistency
lc = [cc._is2D() == self._is2D() for cc in lCam]
if not all(lc):
ls = ['%s : %s'%(cc.Id.Name,cc.Id.Cls) for cc in lCam]
msg = "%s (%s) fed wrong lCam:\n"%(self.Id.Name,self.Id.Cls)
msg += " - " + "\n - ".join(ls)
raise Exception(msg)
# Check config consistency
lconf = [cc.config for cc in lCam]
if not all([cc is not None for cc in lconf]):
msg = "The provided Cams should have a config !"
raise Exception(msg)
config = [cc for cc in lconf if cc is not None][0].copy()
# To be finished after modifying __eq__ in tf.utils
lexcept = ['dvisible','dcompute','color']
msg = "The following Cam do not have a consistent config:"
flag = False
for cc in lCam:
if not cc.config.__eq__(config, lexcept=lexcept):
msg += "\n {0}".format(cc.Id.Name)
flag = True
if flag:
raise Exception(msg)
# Check number of channels wrt data
nR = np.sum([cc._dgeom['nRays'] for cc in lCam])
if not nR == self._ddataRef['nch']:
msg = "Total nb. of rays from lCam != data.shape[1] !"
raise Exception(msg)
return config, lCam, nC
def _checkformat_inputs_dchans(self, dchans=None):
assert dchans is None or isinstance(dchans,dict)
if dchans is None:
dchans = {}
if self._dgeom['lCam'] is not None:
ldchans = [cc._dchans for cc in self._dgeom['lCam']]
for k in ldchans[0].keys():
assert ldchans[0][k].ndim in [1,2]
if ldchans[0][k].ndim==1:
dchans[k] = np.concatenate([dd[k] for dd in ldchans])
else:
dchans[k] = np.concatenate([dd[k]
for dd in ldchans], axis=1)
else:
for k in dchans.keys():
arr = np.asarray(dchans[k]).ravel()
assert arr.size==self._ddata['nch']
dchans[k] = arr
return dchans
def _checkformat_inputs_dextra(self, dextra=None):
assert dextra is None or isinstance(dextra,dict)
if dextra is not None:
for k in dextra.keys():
if not (type(dextra[k]) is dict and 't' in dextra[k].keys()):
msg = "All dextra values should be dict with 't':\n"
msg += " - dextra[%s] = %s"%(k,str(dextra[k]))
raise Exception(msg)
return dextra
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_ddataRef():
lk = ['data', 't', 'X', 'lamb', 'nt', 'nch', 'nlamb', 'nnch', 'nnlamb',
'indtX', 'indtlamb', 'indXlamb', 'indtXlamb']
return lk
@staticmethod
def _get_keys_ddata():
lk = ['data', 't', 'X', 'lamb', 'nt', 'nch', 'nlamb', 'nnch', 'nnlamb',
'indtX', 'indtlamb', 'indXlamb', 'indtXlamb', 'uptodate']
return lk
@staticmethod
def _get_keys_dtreat():
lk = ['order','mask-ind', 'mask-val', 'interp-indt', 'interp-indch',
'data0-indt', 'data0-Dt', 'data0-data',
'dfit', 'indt', 'indch', 'indlamb', 'interp-t']
return lk
@classmethod
def _get_keys_dlabels(cls):
lk = ['data','t','X']
if cls._isSpectral():
lk.append('lamb')
return lk
@staticmethod
def _get_keys_dgeom():
lk = ['config', 'lCam', 'nC']
return lk
@staticmethod
def _get_keys_dX12():
lk = ['from', 'x1','x2','n1', 'n2',
'ind1', 'ind2', 'indr']
return lk
@staticmethod
def _get_keys_dchans():
lk = []
return lk
@staticmethod
def _get_keys_dextra():
lk = []
return lk
###########
# _init
###########
def _init(self, data=None, t=None, X=None, lamb=None, dtreat=None, dchans=None,
dlabels=None, dextra=None, lCam=None, config=None, **kwargs):
kwdargs = locals()
kwdargs.update(**kwargs)
largs = self._get_largs_ddataRef()
kwddataRef = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dtreat()
kwdtreat = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dlabels()
kwdlabels = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dgeom()
kwdgeom = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dchans()
kwdchans = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dextra()
kwdextra = self._extract_kwdargs(kwdargs, largs)
self._set_ddataRef(**kwddataRef)
self.set_dtreat(**kwdtreat)
self._set_ddata()
self._set_dlabels(**kwdlabels)
self._set_dgeom(**kwdgeom)
if self._is2D():
kwdX12 = self._extract_kwdargs(kwdargs, self._get_largs_dX12())
self.set_dX12(**kwdX12)
self.set_dchans(**kwdchans)
self.set_dextra(**kwdextra)
self._dstrip['strip'] = 0
###########
# set dictionaries
###########
def _set_ddataRef(self, data=None, t=None,
X=None, indtX=None,
lamb=None, indtlamb=None, indXlamb=None, indtXlamb=None):
kwdargs = locals()
del kwdargs['self']
lout = self._checkformat_inputs_ddataRef(**kwdargs)
data, t, X, lamb, nt, nch, nlamb, nnch, nnlamb = lout[:9]
indtX, indtlamb, indXlamb, indtXlamb = lout[9:]
self._ddataRef = {'data':data, 't':t, 'X':X, 'lamb':lamb,
'nt':nt, 'nch':nch, 'nlamb':nlamb,
'nnch':nnch, 'nnlamb':nnlamb,
'indtX':indtX, 'indtlamb':indtlamb,
'indXlamb':indXlamb, 'indtXlamb':indtXlamb}
def set_dtreat(self, dtreat=None):
dtreat = self._checkformat_inputs_dtreat(dtreat=dtreat)
self._dtreat = dtreat
def _set_dlabels(self, dlabels=None):
dlabels = self._checkformat_inputs_dlabels(dlabels=dlabels)
self._dlabels.update(dlabels)
def _set_dgeom(self, lCam=None, config=None):
config, lCam, nC = self._checkformat_inputs_dgeom(lCam=lCam,
config=config)
self._dgeom = {'lCam':lCam, 'nC':nC, 'config':config}
def set_dchans(self, dchans=None, method='set'):
""" Set (or update) the dchans dict
dchans is a dict of np.ndarrays of len() = self.nch containing
channel-specific information
Use the kwarg 'method' to set / update the dict
"""
assert method in ['set','update']
dchans = self._checkformat_inputs_dchans(dchans=dchans)
if method == 'set':
self._dchans = dchans
else:
self._dchans.update(dchans)
def set_dextra(self, dextra=None, method='set'):
""" Set (or update) the dextra dict
dextra is a dict of nested dict
It contains all extra signal that can help interpret the data
e.g.: heating power time traces, plasma current...
Each nested dict should have the following fields:
't' : 1D np.ndarray (time vector)
'data' : 1D np.ndarray (data time trace)
'name' : str (used as label in legend)
'units': str (used n parenthesis in legend after name)
Use the kwarg 'method' to set / update the dict
"""
assert method in ['set','update']
dextra = self._checkformat_inputs_dextra(dextra=dextra)
if method == 'set':
self._dextra = dextra
else:
self._dextra.update(dextra)
###########
# strip dictionaries
###########
def _strip_ddata(self, strip=0):
if self._dstrip['strip']==strip:
return
if strip in [0,1] and self._dstrip['strip'] in [2]:
self._set_ddata()
elif strip in [2] and self._dstrip['strip'] in [0,1]:
self.clear_ddata()
def _strip_dgeom(self, strip=0, force=False, verb=True):
if self._dstrip['strip']==strip:
return
if strip in [0] and self._dstrip['strip'] in [1,2]:
lC, config = None, None
if self._dgeom['lCam'] is not None:
assert type(self._dgeom['lCam']) is list
assert all([type(ss) is str for ss in self._dgeom['lCam']])
lC = []
for ii in range(0,len(self._dgeom['lCam'])):
lC.append(utils.load(self._dgeom['lCam'][ii], verb=verb))
elif self._dgeom['config'] is not None:
assert type(self._dgeom['config']) is str
config = utils.load(self._dgeom['config'], verb=verb)
self._set_dgeom(lCam=lC, config=config)
elif strip in [1,2] and self._dstrip['strip'] in [0]:
if self._dgeom['lCam'] is not None:
lpfe = []
for cc in self._dgeom['lCam']:
path, name = cc.Id.SavePath, cc.Id.SaveName
pfe = os.path.join(path, name+'.npz')
lf = os.listdir(path)
lf = [ff for ff in lf if name+'.npz' in ff]
exist = len(lf)==1
if not exist:
msg = """BEWARE:
You are about to delete the lCam objects
Only the path/name to saved a object will be kept
But it appears that the following object has no
saved file where specified (obj.Id.SavePath)
Thus it won't be possible to retrieve it
(unless available in the current console:"""
msg += "\n - {0}".format(pfe)
if force:
warnings.warn(msg)
else:
raise Exception(msg)
lpfe.append(pfe)
self._dgeom['lCam'] = lpfe
self._dgeom['config'] = None
elif self._dgeom['config'] is not None:
path = self._dgeom['config'].Id.SavePath
name = self._dgeom['config'].Id.SaveName
pfe = os.path.join(path, name+'.npz')
lf = os.listdir(path)
lf = [ff for ff in lf if name+'.npz' in ff]
exist = len(lf)==1
if not exist:
msg = """BEWARE:
You are about to delete the config object
Only the path/name to saved a object will be kept
But it appears that the following object has no
saved file where specified (obj.Id.SavePath)
Thus it won't be possible to retrieve it
(unless available in the current console:"""
msg += "\n - {0}".format(pfe)
if force:
warnings.warn(msg)
else:
raise Exception(msg)
self._dgeom['config'] = pfe
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip['allowed'] = [0,1,2,3]
nMax = max(cls._dstrip['allowed'])
doc = """
1: dgeom pathfiles
2: dgeom pathfiles + clear data
"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0, verb=True):
# super()
super(DataAbstract,self).strip(strip=strip, verb=verb)
def _strip(self, strip=0, verb=True):
self._strip_ddata(strip=strip)
self._strip_dgeom(strip=strip, verb=verb)
def _to_dict(self):
dout = {'ddataRef':{'dict':self._ddataRef, 'lexcept':None},
'ddata':{'dict':self._ddata, 'lexcept':None},
'dtreat':{'dict':self._dtreat, 'lexcept':None},
'dlabels':{'dict':self._dlabels, 'lexcept':None},
'dgeom':{'dict':self._dgeom, 'lexcept':None},
'dchans':{'dict':self._dchans, 'lexcept':None},
'dextra':{'dict':self._dextra, 'lexcept':None}}
if self._is2D():
dout['dX12'] = {'dict':self._dX12, 'lexcept':None}
return dout
def _from_dict(self, fd):
self._ddataRef.update(**fd['ddataRef'])
self._ddata.update(**fd['ddata'])
self._dtreat.update(**fd['dtreat'])
self._dlabels.update(**fd['dlabels'])
self._dgeom.update(**fd['dgeom'])
self._dextra.update(**fd['dextra'])
if 'dchans' not in fd.keys():
fd['dchans'] = {}
self._dchans.update(**fd['dchans'])
if self._is2D():
self._dX12.update(**fd['dX12'])
###########
# properties
###########
@property
def ddataRef(self):
return self._ddataRef
@property
def ddata(self):
if not self._ddata['uptodate']:
self._set_ddata()
return self._ddata
@property
def dtreat(self):
return self._dtreat
@property
def dlabels(self):
return self._dlabels
@property
def dgeom(self):
return self._dgeom
@property
def dextra(self):
return self._dextra
def get_ddata(self, key):
if not self._ddata['uptodate']:
self._set_ddata()
return self._ddata[key]
@property
def data(self):
return self.get_ddata('data')
@property
def t(self):
return self.get_ddata('t')
@property
def X(self):
return self.get_ddata('X')
@property
def nt(self):
return self.get_ddata('nt')
@property
def nch(self):
return self.get_ddata('nch')
@property
def config(self):
return self._dgeom['config']
@property
def lCam(self):
return self._dgeom['lCam']
@property
def _isLOS(self):
c0 = self._dgeom['lCam'] is not None
if c0:
c0 = all([cc._isLOS() for cc in self.dgeom['lCam']])
return c0
@abstractmethod
def _isSpectral(self):
return 'spectral' in self.__class__.name.lower()
@abstractmethod
def _is2D(self):
return '2d' in self.__class__.__name__.lower()
###########
# Hidden and public methods for ddata
###########
def set_XRef(self, X=None, indtX=None, indtXlamb=None):
""" Reset the reference X
Useful if to replace channel indices by a time-vraying quantity
e.g.: distance to the magnetic axis
"""
out = self._checkformat_inputs_XRef(X=X, indtX=indtX,
indXlamb=indtXlamb)
X, nnch, indtX, indXlamb, indtXlamb = out
self._ddataRef['X'] = X
self._ddataRef['nnch'] = nnch
self._ddataRef['indtX'] = indtX
self._ddataRef['indtXlamb'] = indtXlamb
self._ddata['uptodate'] = False
def set_dtreat_indt(self, t=None, indt=None):
""" Store the desired index array for the time vector
If an array of indices (refering to self.ddataRef['t'] is not provided,
uses self.select_t(t=t) to produce it
"""
lC = [indt is not None, t is not None]
if all(lC):
msg = "Please provide either t or indt (or none)!"
raise Exception(msg)
if lC[1]:
ind = self.select_t(t=t, out=bool)
else:
ind = _format_ind(indt, n=self._ddataRef['nt'])
self._dtreat['indt'] = ind
self._ddata['uptodate'] = False
def set_dtreat_indch(self, indch=None):
""" Store the desired index array for the channels
If None => all channels
Must be a 1d array
"""
if indch is not None:
indch = np.asarray(indch)
assert indch.ndim==1
indch = _format_ind(indch, n=self._ddataRef['nch'])
self._dtreat['indch'] = indch
self._ddata['uptodate'] = False
def set_dtreat_indlamb(self, indlamb=None):
""" Store the desired index array for the wavelength
If None => all wavelengths
Must be a 1d array
"""
if not self._isSpectral():
msg = "The wavelength can only be set with DataSpectral object !"
raise Exception(msg)
if indlamb is not None:
indlamb = np.asarray(indlamb)
assert indlamb.ndim==1
indlamb = _format_ind(indlamb, n=self._ddataRef['nlamb'])
self._dtreat['indlamb'] = indlamb
self._ddata['uptodate'] = False
def set_dtreat_mask(self, ind=None, val=np.nan):
assert ind is None or hasattr(ind,'__iter__')
assert type(val) in [int,float,np.int64,np.float64]
if ind is not None:
ind = _format_ind(ind, n=self._ddataRef['nch'])
self._dtreat['mask-ind'] = ind
self._dtreat['mask-val'] = val
self._ddata['uptodate'] = False
def set_dtreat_data0(self, data0=None, Dt=None, indt=None):
lC = [data0 is not None, Dt is not None, indt is not None]
assert np.sum(lC) <= 1
if any(lC):
if lC[0]:
data0 = np.asarray(data0)
if self._isSpectral():
shape = (self._ddataRef['nch'],self._ddataRef['nlamb'])
else:
shape = (self._ddataRef['nch'],)
data0 = data0.ravel()
if not data0.shape == shape:
msg = "Provided data0 has wrong shape !\n"
msg += " - Expected: %s\n"%str(shape)
msg += " - Provided: %s"%data0.shape
raise Exception(msg)
Dt, indt = None, None
else:
if lC[2]:
indt = _format_ind(indt, n=self._ddataRef['nt'])
else:
indt = self.select_t(t=Dt, out=bool)
if np.any(indt):
if self._isSpectral():
data0 = self._ddataRef['data'][indt,:,:]
else:
data0 = self._ddataRef['data'][indt,:]
if np.sum(indt)>1:
data0 = np.nanmean(data0,axis=0)
self._dtreat['data0-indt'] = indt
self._dtreat['data0-Dt'] = Dt
self._dtreat['data0-data'] = data0
self._ddata['uptodate'] = False
def set_dtreat_interp_indt(self, indt=None):
""" Set the indices of the times for which to interpolate data
The index can be provided as:
- A 1d np.ndarray of boolean or int indices
=> interpolate data at these times for all channels
- A dict with:
* keys = int indices of channels
* values = array of int indices of times at which to interpolate
Time indices refer to self.ddataRef['t']
Channel indices refer to self.ddataRef['X']
"""
lC = [indt is None, type(indt) in [np.ndarray,list], type(indt) is dict]
assert any(lC)
if lC[2]:
lc = [type(k) is int and k<self._ddataRef['nch'] for k in indt.keys()]
assert all(lc)
for k in indt.keys():
assert hasattr(indt[k],'__iter__')
indt[k] = _format_ind(indt[k], n=self._ddataRef['nt'])
elif lC[1]:
indt = np.asarray(indt)
assert indt.ndim==1
indt = _format_ind(indt, n=self._ddataRef['nt'])
self._dtreat['interp-indt'] = indt
self._ddata['uptodate'] = False
def set_dtreat_interp_indch(self, indch=None):
""" Set the indices of the channels for which to interpolate data
The index can be provided as:
- A 1d np.ndarray of boolean or int indices of channels
=> interpolate data at these channels for all times
- A dict with:
* keys = int indices of times
* values = array of int indices of chan. for which to interpolate
Time indices refer to self.ddataRef['t']
Channel indices refer to self.ddataRef['X']
"""
lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict]
assert any(lC)
if lC[2]:
lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()]
assert all(lc)
for k in indch.keys():
assert hasattr(indch[k],'__iter__')
indch[k] = _format_ind(indch[k], n=self._ddataRef['nch'])
elif lC[1]:
indch = np.asarray(indch)
assert indch.ndim==1
indch = _format_ind(indch, n=self._ddataRef['nch'])
self._dtreat['interp-indch'] = indch
self._ddata['uptodate'] = False
def set_dtreat_dfit(self, dfit=None):
""" Set the fitting dictionnary
A dict contaning all parameters for fitting the data
Valid dict content includes:
- 'type': str
'fft': A fourier filtering
'svd': A svd filtering
"""
warnings.warn("Not implemented yet !, dfit forced to None")
dfit = None
assert dfit is None or isinstance(dfit,dict)
if isinstance(dfit,dict):
assert 'type' in dfit.keys()
assert dfit['type'] in ['svd','fft']
self._dtreat['dfit'] = dfit
self._ddata['uptodate'] = False
def set_dtreat_interpt(self, t=None):
""" Set the time vector on which to interpolate the data """
if t is not None:
t = np.unique(np.asarray(t, dtype=float).ravel())
self._dtreat['interp-t'] = t
@staticmethod
def _mask(data, mask_ind, mask_val):
if mask_ind is not None:
if data.ndim==2:
data[:,mask_ind] = mask_val
elif data.ndim==3:
data[:,mask_ind,:] = mask_val
return data
@staticmethod
def _interp_indt(data, ind, t):
msg = "interp not coded yet for 3d data !"
assert data.ndim==2, msg
if type(ind) is dict:
for kk in ind.keys():
data[ind[kk],kk] = np.interp(t[ind[kk]],
t[~ind[kk]], data[~ind[kk],kk],
right=np.nan, left=np.nan)
elif isinstance(ind,np.ndarray):
for ii in range(0,data.shape[1]):
data[ind,ii] = np.interp(t[ind], t[~ind], data[~ind,ii])
return data
@staticmethod
def _interp_indch(data, ind, X):
msg = "interp not coded yet for 3d data !"
assert data.ndim==2, msg
if type(ind) is dict:
for kk in ind.keys():
data[kk,ind[kk]] = np.interp(X[ind[kk]],
X[~ind[kk]], data[kk,~ind[kk]],
right=np.nan, left=np.nan)
elif isinstance(ind,np.ndarray):
for ii in range(0,data.shape[0]):
data[ii,ind] = np.interp(X[ind], X[~ind], data[ii,~ind])
return data
@staticmethod
def _data0(data, data0):
if data0 is not None:
if data.shape == data0.shape:
data = data - data0
elif data.ndim == 2:
data = data - data0[np.newaxis,:]
if data.ndim == 3:
data = data - data0[np.newaxis,:,:]
return data
@staticmethod
def _dfit(data, dfit):
if dfit is not None:
if dfit['type']=='svd':
#data = _comp.()
pass
elif dfit['type']=='svd':
#data = _comp.()
pass
return data
@staticmethod
def _indt(data, t=None, X=None, nnch=None,
indtX=None, indtlamb=None, indtXlamb=None, indt=None):
nt0 = t.size
if data.ndim==2:
data = data[indt,:]
elif data.ndim==3:
data = data[indt,:,:]
if t is not None:
t = t[indt]
if X is not None and X.ndim == 2 and X.shape[0] == nt0:
X = X[indt,:]
nnch = indt.sum()
if indtX is not None:
indtX = indtX[indt]
if indtlamb is not None:
indtlamb = indtlamb[indt]
elif indtXlamb is not None:
indtXlamb = indtXlamb[indt,:]
return data, t, X, indtX, indtlamb, indtXlamb, nnch
@staticmethod
def _indch(data, X=None,
indXlamb=None, indtXlamb=None, indch=None):
if data.ndim==2:
data = data[:,indch]
elif data.ndim==3:
data = data[:,indch,:]
if X is not None:
X = X[indch] if X.ndim==1 else X[:,indch]
if indXlamb is not None:
indXlamb = indXlamb[indch]
elif indtXlamb is not None:
indtXlamb = indtXlamb[:,indch]
return data, X, indXlamb, indtXlamb
@staticmethod
def _indlamb(data, lamb=None,
indlamb=None):
data = data[:,:,indlamb]
if lamb is not None:
lamb = lamb[indlamb] if lamb.ndim==1 else lamb[:,indlamb]
return data, lamb
@staticmethod
def _interp_t(data, t, indtX=None,
indtlamb=None, indtXlamb=None, interpt=None, kind='linear'):
f = scpinterp.interp1d(t, data, kind=kind, axis=0, copy=True,
bounds_error=True, fill_value=np.nan,
assume_sorted=False)
d = f(data)
lC = [indtX is not None, indtlamb is not None, indtXlamb is not None]
if any(lC):
indt = np.digitize(t, (interpt[1:]+interpt[:-1])/2.)
if lC[0]:
indtX = indtX[indt]
if lC[1]:
indtlamb = indtlamb[indt]
elif lC[2]:
indtXlamb = indtXlamb[indt,:]
return d, interpt, indtX, indtlamb, indtXlamb
def _get_ddata(self, key):
if not self._ddata['uptodate']:
self._set_ddata()
return self._ddata[key]
def set_dtreat_order(self, order=None):
""" Set the order in which the data treatment should be performed
Provide an ordered list of keywords indicating the order in which
you wish the data treatment steps to be performed.
Each keyword corresponds to a step.
Available steps are (in default order):
- 'mask' :
- 'interp_indt' :
- 'interp_indch' :
- 'data0' :
- 'dfit' :
- 'indt' :
- 'indch' :
- 'interp_t':
All steps are performed on the stored reference self.dataRef['data']
Thus, the time and channels restriction must be the last 2 steps before
interpolating on an external time vector
"""
if order is None:
order = list(self._ddef['dtreat']['order'])
assert type(order) is list and all([type(ss) is str for ss in order])
if not all([ss in ['indt','indch','indlamb'] for ss in order][-4:-1]):
msg = "indt and indch must be the treatment steps -2 and -3 !"
raise Exception(msg)
if not order[-1]=='interp-t':
msg = "interp-t must be the last treatment step !"
raise Exception(msg)
self._dtreat['order'] = order
self._ddata['uptodate'] = False
def _get_treated_data(self):
""" Produce a working copy of the data based on the treated reference
The reference data is always stored and untouched in self.ddataRef
You always interact with self.data, which returns a working copy.
That working copy is the reference data, eventually treated along the
lines defined (by the user) in self.dtreat
By reseting the treatment (self.reset()) all data treatment is
cancelled and the working copy returns the reference data.
"""
# --------------------
# Copy reference data
d = self._ddataRef['data'].copy()
t, X = self._ddataRef['t'].copy(), self._ddataRef['X'].copy()
lamb = self._ddataRef['lamb']
if lamb is not None:
lamb = lamb.copy()
indtX = self._ddataRef['indtX']
if indtX is not None:
indtX = indtX.copy()
indtlamb = self._ddataRef['indtlamb']
if indtlamb is not None:
indtlamb = indtlamb.copy()
indXlamb = self._ddataRef['indXlamb']
if indXlamb is not None:
indXlamb = indXlamb.copy()
indtXlamb = self._ddataRef['indtXlamb']
if indtXlamb is not None:
indtXlamb = indtXlamb.copy()
nnch = self._ddataRef['nnch']
# --------------------
# Apply data treatment
for kk in self._dtreat['order']:
# data only
if kk=='mask' and self._dtreat['mask-ind'] is not None:
d = self._mask(d, self._dtreat['mask-ind'],
self._dtreat['mask-val'])
if kk=='interp_indt':
d = self._interp_indt(d, self._dtreat['interp-indt'],
self._ddataRef['t'])
if kk=='interp_indch':
d = self._interp_indch(d, self._dtreat['interp-indch'],
self._ddataRef['X'])
if kk=='data0':
d = self._data0(d, self._dtreat['data0-data'])
if kk=='dfit' and self._dtreat['dfit'] is not None:
d = self._dfit(d, **self._dtreat['dfit'])
# data + others
if kk=='indt' and self._dtreat['indt'] is not None:
d,t,X, indtX,indtlamb,indtXlamb, nnch = self._indt(d, t, X,
nnch, indtX,
indtlamb, indtXlamb,
self._dtreat['indt'])
if kk=='indch' and self._dtreat['indch'] is not None:
d,X, indXlamb,indtXlamb = self._indch(d, X, indXlamb, indtXlamb,
self._dtreat['indch'])
if kk=='indlamb' and self._dtreat['indlamb'] is not None:
d, lamb = self._indch(d, lamb, self._dtreat['indlamb'])
if kk=='interp_t' and self._dtreat['interp-t'] is not None:
d,t, indtX,indtlamb,indtXlamb\
= self._interp_t(d, t, indtX, indtlamb, indtXlamb,
self._dtreat['interp-t'], kind='linear')
# --------------------
# Safety check
if d.ndim==2:
(nt, nch), nlamb = d.shape, 0
else:
nt, nch, nlamb = d.shape
lc = [d.ndim in [2,3], t.shape == (nt,), X.shape == (nnch, nch)]
if not all(lc):
msg = "Data, X, t shape unconsistency:\n"
msg += " - data.shape: %s\n"%str(d.shape)
msg += " - X.shape: %s\n"%str(X.shape)
msg += " - (nnch, nch): %s\n"%str((nnch,nch))
msg += " - t.shape: %s\n"%str(t.shape)
msg += " - nt : %s"%str(nt)
raise Exception(msg)
if lamb is not None:
assert lamb.shape == (self._ddataRef['nnlamb'], nlamb)
lout = [d, t, X, lamb, nt, nch, nlamb,
indtX, indtlamb, indXlamb, indtXlamb, nnch]
return lout
def _set_ddata(self):
if not self._ddata['uptodate']:
data, t, X, lamb, nt, nch, nlamb,\
indtX, indtlamb, indXlamb, indtXlamb,\
nnch = self._get_treated_data()
self._ddata['data'] = data
self._ddata['t'] = t
self._ddata['X'] = X
self._ddata['lamb'] = lamb
self._ddata['nt'] = nt
self._ddata['nch'] = nch
self._ddata['nlamb'] = nlamb
self._ddata['nnch'] = nnch
self._ddata['nnlamb'] = self._ddataRef['nnlamb']
self._ddata['indtX'] = indtX
self._ddata['indtlamb'] = indtlamb
self._ddata['indXlamb'] = indXlamb
self._ddata['indtXlamb'] = indtXlamb
self._ddata['uptodate'] = True
def clear_ddata(self):
""" Clear the working copy of data
Harmless, as it preserves the reference copy and the treatment dict
Use only to free some memory
"""
self._ddata = dict.fromkeys(self._get_keys_ddata())
self._ddata['uptodate'] = False
def clear_dtreat(self, force=False):
""" Clear all treatment parameters in self.dtreat
Subsequently also clear the working copy of data
The working copy of data is thus reset to the reference data
"""
lC = [self._dtreat[k] is not None for k in self._dtreat.keys()
if k != 'order']
if any(lC) and not force:
msg = """BEWARE : You are about to delete the data treatment
i.e.: to clear self.dtreat (and also self.ddata)
Are you sure ?
If yes, use self.clear_dtreat(force=True)"""
raise Exception(msg)
dtreat = dict.fromkeys(self._get_keys_dtreat())
self._dtreat = self._checkformat_inputs_dtreat(dtreat)
self.clear_ddata()
def dchans(self, key=None):
""" Return the dchans updated with indch
Return a dict with all keys if key=None
"""
if self._dtreat['indch'] is None or np.all(self._dtreat['indch']):
dch = dict(self._dchans) if key is None else self._dchans[key]
else:
dch = {}
lk = self._dchans.keys() if key is None else [key]
for kk in lk:
if self._dchans[kk].ndim==1:
dch[kk] = self._dchans[kk][self._dtreat['indch']]
elif self._dchans[kk].ndim==2:
dch[kk] = self._dchans[kk][:,self._dtreat['indch']]
else:
msg = "Don't know how to treat self._dchans[%s]:"%kk
msg += "\n shape = %s"%(kk,str(self._dchans[kk].shape))
warnings.warn(msg)
if key is not None:
dch = dch[key]
return dch
###########
# Other public methods
###########
def select_t(self, t=None, out=bool):
""" Return a time index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference time vector self.ddataRef['t']
Parameters
----------
t : None / float / np.ndarray / list / tuple
The time values to be selected:
- None : ind matches all time values
- float : ind is True only for the time closest to t
- np.ndarray : ind is True only for the times closest to t
- list (len()==2): ind is True for the times inside [t[0],t[1]]
- tuple (len()==2): ind is True for times outside ]t[0];t[1][
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nt'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
"""
assert out in [bool,int]
ind = _select_ind(t, self._ddataRef['t'], self._ddataRef['nt'])
if out is int:
ind = ind.nonzero()[0]
return ind
def select_ch(self, val=None, key=None, log='any', touch=None, out=bool):
""" Return a channels index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference channel/'X' vector self.ddataRef['X']
There are 3 different ways of selecting channels, by refering to:
- The 'X' vector/array values in self.dataRef['X']
- The dict of channels keys/values (if self.dchans != None)
- which element each LOS touches (if self.lCam != None)
Parameters
----------
val : None / str / float / np.array / list / tuple
The value against which to dicriminate.
Behaviour depends whether key is provided:
- key is None => val compares vs self.ddataRef['X']
- key provided => val compares vs self.dchans[key]
If key is None, the behaviour is similar to self.select_indt():
- None : ind matches all channels
- float : ind is True only for X closest to val
- np.ndarray : ind is True only for X closest to val
- list (len()==2): ind is True for X inside [val[0],val[1]]
- tuple (len()==2): ind is True for X outside ]val[0];val[1][
key : None / str
If provided, dict key to indicate which self.dchans[key] to use
log : str
If key provided, val can be a list of criteria
Then, log indicates whether all / any / none should be matched
touch : None
If key and val are None, return the indices of the LOS touching the
elements indicated in touch.
Requires that self.dgeom['lCam'] is not None (tf.geom.Cam.select())
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nt'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
"""
assert out in [int,bool]
assert log in ['any','all','not']
lc = [val is None, key is None, touch is None]
lC = [all(lc), all(lc[:2]) and not lc[2],
not lc[0] and all(lc[1:]), not any(lc[:2]) and lc[2]]
assert np.sum(lC)==1
if lC[0]:
# get all channels
ind = np.ones((self._ddataRef['nch'],),dtype=bool)
elif lC[1]:
# get touch
if self._dgeom['lCam'] is None:
msg = "self.dgeom['lCam'] must be set to use touch !"
raise Exception(msg)
if any([type(cc) is str for cc in self._dgeom['lCam']]):
msg = "self.dgeom['lCam'] contains pathfiles !"
msg += "\n => Run self.strip(0)"
raise Exception(msg)
ind = []
for cc in self._dgeom['lCam']:
ind.append(cc.select(touch=touch, log=log, out=bool))
if len(ind)==1:
ind = ind[0]
else:
ind = np.concatenate(tuple(ind))
elif lC[2]:
# get values on X
if self._ddataRef['nnch']==1:
ind = _select_ind(val, self._ddataRef['X'], self._ddataRef['nch'])
else:
ind = np.zeros((self._ddataRef['nt'],self._ddataRef['nch']),dtype=bool)
for ii in range(0,self._ddataRef['nnch']):
iind = self._ddataRef['indtX']==ii
ind[iind,:] = _select_ind(val, self._ddataRef['X'],
self._ddataRef['nch'])[np.newaxis,:]
else:
if not (type(key) is str and key in self._dchans.keys()):
msg = "Provided key not valid!\n"
msg += " - key: %s\n"%str(key)
msg += "Please provide a valid key of self.dchans():\n"
msg += " - " + "\n - ".join(self._dchans.keys())
raise Exception(msg)
ltypes = [str,int,float,np.int64,np.float64]
C0 = type(val) in ltypes
C1 = type(val) in [list,tuple,np.ndarray]
assert C0 or C1
if C0:
val = [val]
else:
assert all([type(vv) in ltypes for vv in val])
ind = np.vstack([self._dchans[key]==ii for ii in val])
if log=='any':
ind = np.any(ind,axis=0)
elif log=='all':
ind = np.all(ind,axis=0)
else:
ind = ~np.any(ind,axis=0)
if out is int:
ind = ind.nonzero()[0]
return ind
def select_lamb(self, lamb=None, out=bool):
""" Return a wavelength index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference time vector self.ddataRef['lamb']
Parameters
----------
lamb : None / float / np.ndarray / list / tuple
The time values to be selected:
- None : ind matches all wavelength values
- float : ind is True only for the wavelength closest to lamb
- np.ndarray : ind True only for the wavelength closest to lamb
- list (len()==2): ind True for wavelength in [lamb[0],lamb[1]]
- tuple (len()==2): ind True for wavelength outside ]t[0];t[1][
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nlamb'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
"""
if not self._isSpectral():
msg = ""
raise Exception(msg)
assert out in [bool,int]
ind = _select_ind(lamb, self._ddataRef['lamb'], self._ddataRef['nlamb'])
if out is int:
ind = ind.nonzero()[0]
return ind
def plot(self, key=None,
cmap=None, ms=4, vmin=None, vmax=None,
vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False,
ntMax=None, nchMax=None, nlbdMax=3,
lls=None, lct=None, lcch=None, lclbd=None, cbck=None,
inct=[1,10], incX=[1,5], inclbd=[1,10],
fmt_t='06.3f', fmt_X='01.0f',
invert=True, Lplot='In', dmarker=None,
bck=True, fs=None, dmargin=None, wintit=None, tit=None,
fontsize=None, labelpad=None, draw=True, connect=True):
""" Plot the data content in a generic interactive figure """
kh = _plot.Data_plot(self, key=key, indref=0,
cmap=cmap, ms=ms, vmin=vmin, vmax=vmax,
vmin_map=vmin_map, vmax_map=vmax_map,
cmap_map=cmap_map, normt_map=normt_map,
ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax,
lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck,
inct=inct, incX=incX, inclbd=inclbd,
fmt_t=fmt_t, fmt_X=fmt_X, Lplot=Lplot,
invert=invert, dmarker=dmarker, bck=bck,
fs=fs, dmargin=dmargin, wintit=wintit, tit=tit,
fontsize=fontsize, labelpad=labelpad,
draw=draw, connect=connect)
return kh
def plot_compare(self, lD, key=None,
cmap=None, ms=4, vmin=None, vmax=None,
vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False,
ntMax=None, nchMax=None, nlbdMax=3,
lls=None, lct=None, lcch=None, lclbd=None, cbck=None,
inct=[1,10], incX=[1,5], inclbd=[1,10],
fmt_t='06.3f', fmt_X='01.0f', fmt_l='07.3f',
invert=True, Lplot='In', dmarker=None,
sharey=True, sharelamb=True,
bck=True, fs=None, dmargin=None, wintit=None, tit=None,
fontsize=None, labelpad=None, draw=True, connect=True):
""" Plot several Data instances of the same diag
Useful to compare :
- the diag data for 2 different shots
- experimental vs synthetic data for the same shot
"""
C0 = isinstance(lD,list)
C0 = C0 and all([issubclass(dd.__class__,DataAbstract) for dd in lD])
C1 = issubclass(lD.__class__,DataAbstract)
assert C0 or C1, 'Provided first arg. must be a tf.data.DataAbstract or list !'
lD = [lD] if C1 else lD
kh = _plot.Data_plot([self]+lD, key=key, indref=0,
cmap=cmap, ms=ms, vmin=vmin, vmax=vmax,
vmin_map=vmin_map, vmax_map=vmax_map,
cmap_map=cmap_map, normt_map=normt_map,
ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax,
lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck,
inct=inct, incX=incX, inclbd=inclbd,
fmt_t=fmt_t, fmt_X=fmt_X, fmt_l=fmt_l, Lplot=Lplot,
invert=invert, dmarker=dmarker, bck=bck,
sharey=sharey, sharelamb=sharelamb,
fs=fs, dmargin=dmargin, wintit=wintit, tit=tit,
fontsize=fontsize, labelpad=labelpad,
draw=draw, connect=connect)
return kh
def plot_combine(self, lD, key=None, bck=True, indref=0,
cmap=None, ms=4, vmin=None, vmax=None,
vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False,
ntMax=None, nchMax=None, nlbdMax=3,
inct=[1,10], incX=[1,5], inclbd=[1,10],
lls=None, lct=None, lcch=None, lclbd=None, cbck=None,
fmt_t='06.3f', fmt_X='01.0f',
invert=True, Lplot='In', dmarker=None,
fs=None, dmargin=None, wintit=None, tit=None, sharex=False,
fontsize=None, labelpad=None, draw=True, connect=True):
""" Plot several Data instances of different diags
Useful to visualize several diags for the same shot
"""
C0 = isinstance(lD,list)
C0 = C0 and all([issubclass(dd.__class__,DataAbstract) for dd in lD])
C1 = issubclass(lD.__class__,DataAbstract)
assert C0 or C1, 'Provided first arg. must be a tf.data.DataAbstract or list !'
lD = [lD] if C1 else lD
kh = _plot.Data_plot_combine([self]+lD, key=key, bck=bck,
indref=indref, cmap=cmap, ms=ms,
vmin=vmin, vmax=vmax,
vmin_map=vmin_map, vmax_map=vmax_map,
cmap_map=cmap_map, normt_map=normt_map,
ntMax=ntMax, nchMax=nchMax,
inct=inct, incX=incX,
lls=lls, lct=lct, lcch=lcch, cbck=cbck,
fmt_t=fmt_t, fmt_X=fmt_X,
invert=invert, Lplot=Lplot, sharex=sharex,
dmarker=dmarker, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, fontsize=fontsize,
labelpad=labelpad, draw=draw,
connect=connect)
return kh
def calc_spectrogram(self, fmin=None,
method='scipy-fourier', deg=False,
window='hann', detrend='linear',
nperseg=None, noverlap=None,
boundary='constant', padded=True,
wave='morlet', warn=True):
""" Return the power spectrum density for each channel
The power spectrum density is computed with the chosen method
Parameters
----------
fmin : None / float
The minimum frequency of interest
If None, set to 5/T, where T is the whole time interval
Used to constrain the number of points per window
deg : bool
Flag indicating whether to return the phase in deg (vs rad)
method : str
Flag indicating which method to use for computation:
- 'scipy-fourier': uses scipy.signal.spectrogram()
(windowed fast fourier transform)
- 'scipy-stft': uses scipy.signal.stft()
(short time fourier transform)
- 'scipy-wavelet': uses scipy.signal.cwt()
(continuous wavelet transform)
The following keyword args are fed to one of these scipy functions
See the corresponding online scipy documentation for details on
each function and its arguments
window : None / str / tuple
If method='scipy-fourier'
Flag indicating which type of window to use
detrend : None / str
If method='scipy-fourier'
Flag indicating whether and how to remove the trend of the signal
nperseg : None / int
If method='scipy-fourier'
Number of points to the used for each window
If None, deduced from fmin
noverlap:
If method='scipy-fourier'
Number of points on which successive windows should overlap
If None, nperseg-1
boundary:
If method='scipy-stft'
padded :
If method='scipy-stft'
d
wave: None / str
If method='scipy-wavelet'
Return
------
tf : np.ndarray
Time vector of the spectrogram (1D)
f: np.ndarray
frequency vector of the spectrogram (1D)
lspect: list of np.ndarrays
list of () spectrograms
"""
if self._isSpectral():
msg = "spectrogram not implemented yet for spectral data class"
raise Exception(msg)
tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t,
fmin=fmin, deg=deg,
method=method, window=window,
detrend=detrend, nperseg=nperseg,
noverlap=noverlap, boundary=boundary,
padded=padded, wave=wave,
warn=warn)
return tf, f, lpsd, lang
def plot_spectrogram(self, fmin=None, fmax=None,
method='scipy-fourier', deg=False,
window='hann', detrend='linear',
nperseg=None, noverlap=None,
boundary='constant', padded=True, wave='morlet',
invert=True, plotmethod='imshow',
cmap_f=None, cmap_img=None,
ms=4, ntMax=None, nfMax=None,
bck=True, fs=None, dmargin=None, wintit=None,
tit=None, vmin=None, vmax=None, normt=False,
draw=True, connect=True, returnspect=False, warn=True):
""" Plot the spectrogram of all channels with chosen method
All non-plotting arguments are fed to self.calc_spectrogram()
see self.calc_spectrogram? for details
Parameters
----------
Return
------
kh : tofu.utils.HeyHandler
The tofu KeyHandler object handling figure interactivity
"""
if self._isSpectral():
msg = "spectrogram not implemented yet for spectral data class"
raise Exception(msg)
tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t,
fmin=fmin, deg=deg,
method=method, window=window,
detrend=detrend, nperseg=nperseg,
noverlap=noverlap, boundary=boundary,
padded=padded, wave=wave,
warn=warn)
kh = _plot.Data_plot_spectrogram(self, tf, f, lpsd, lang, fmax=fmax,
invert=invert, plotmethod=plotmethod,
cmap_f=cmap_f, cmap_img=cmap_img,
ms=ms, ntMax=ntMax,
nfMax=nfMax, bck=bck, fs=fs,
dmargin=dmargin, wintit=wintit,
tit=tit, vmin=vmin, vmax=vmax,
normt=normt, draw=draw,
connect=connect)
if returnspect:
return kh, tf, f, lpsd, lang
else:
return kh
def calc_svd(self, lapack_driver='gesdd'):
""" Return the SVD decomposition of data
The input data np.ndarray shall be of dimension 2,
with time as the first dimension, and the channels in the second
Hence data should be of shape (nt, nch)
Uses scipy.linalg.svd(), with:
full_matrices = True
compute_uv = True
overwrite_a = False
check_finite = True
See scipy online doc for details
Return
------
chronos: np.ndarray
First arg (u) returned by scipy.linalg.svd()
Contains the so-called 'chronos', of shape (nt, nt)
i.e.: the time-dependent part of the decoposition
s: np.ndarray
Second arg (s) returned by scipy.linalg.svd()
Contains the singular values, of shape (nch,)
i.e.: the channel-dependent part of the decoposition
topos: np.ndarray
Third arg (v) returned by scipy.linalg.svd()
Contains the so-called 'topos', of shape (nch, nch)
i.e.: the channel-dependent part of the decoposition
"""
if self._isSpectral():
msg = "svd not implemented yet for spectral data class"
raise Exception(msg)
chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver)
return chronos, s, topos
def extract_svd(self, modes=None, lapack_driver='gesdd', out=object):
""" Extract, as Data object, the filtered signal using selected modes
The svd (chronos, s, topos) is computed,
The selected modes are used to re-construct a filtered signal, using:
data = chronos[:,modes] @ (s[None,modes] @ topos[modes,:]
The result is exported a an array or a Data object on the same class
"""
if self._isSpectral():
msg = "svd not implemented yet for spectral data class"
raise Exception(msg)
msg = None
if modes is not None:
try:
modes = np.r_[modes].astype(int)
except Exception as err:
msg = str(err)
else:
msg = "Arg mode cannot be None !"
if msg is not None:
msg += "\n\nArg modes must a positive int or a list of such!\n"
msg += " - Provided: %s"%str(modes)
raise Exception(msg)
chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver)
data = np.matmult(chronos[:,modes], (s[modes,None] * topos[modes,:]))
if out is object:
data = self.__class__(data=data, t=self.t, X=self.X,
lCam=self.lCam, config=self.config,
Exp=self.Id.Exp, Diag=self.Id.Diag,
shot=self.Id.shot,
Name=self.Id.Name + '-svd%s'%str(modes))
return data
def plot_svd(self, lapack_driver='gesdd', modes=None, key=None, bck=True,
Lplot='In', cmap=None, vmin=None, vmax=None,
cmap_topos=None, vmin_topos=None, vmax_topos=None,
ntMax=None, nchMax=None, ms=4,
inct=[1,10], incX=[1,5], incm=[1,5],
lls=None, lct=None, lcch=None, lcm=None, cbck=None,
invert=False, fmt_t='06.3f', fmt_X='01.0f', fmt_m='03.0f',
fs=None, dmargin=None, labelpad=None, wintit=None, tit=None,
fontsize=None, draw=True, connect=True):
""" Plot the chosen modes of the svd decomposition
All modes will be plotted, the keyword 'modes' is only used to
determine the reference modes for computing a common scale for
vizualisation
Runs self.calc_svd() and then plots the result in an interactive figure
"""
if self._isSpectral():
msg = "svd not implemented yet for spectral data class"
raise Exception(msg)
# Computing (~0.2 s for 50 channels 1D and 1000 times)
chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver)
# Plotting (~11 s for 50 channels 1D and 1000 times)
kh = _plot.Data_plot_svd(self, chronos, s, topos, modes=modes,
key=key, bck=bck, Lplot=Lplot,
cmap=cmap, vmin=vmin, vmax=vmax,
cmap_topos=cmap_topos, vmin_topos=vmin_topos,
vmax_topos=vmax_topos,
ntMax=ntMax, nchMax=nchMax, ms=ms,
inct=inct, incX=incX, incm=incm,
lls=lls, lct=lct, lcch=lcch, lcm=lcm, cbck=cbck,
invert=invert, fmt_t=fmt_t, fmt_X=fmt_X, fmt_m=fmt_m,
fs=fs, dmargin=dmargin, labelpad=labelpad, wintit=wintit,
tit=tit, fontsize=fontsize, draw=draw,
connect=connect)
return kh
def save(self, path=None, name=None,
strip=None, deep=False, mode='npz',
compressed=False, verb=True, return_pfe=False):
if deep is False:
self.strip(1)
out = super(DataAbstract, self).save(path=path, name=name,
deep=deep, mode=mode,
strip=strip, compressed=compressed,
return_pfe=return_pfe, verb=verb)
return out
def save_to_imas(self, ids=None, shot=None, run=None, refshot=None, refrun=None,
user=None, database=None, version=None, occ=None,
dryrun=False, deep=True, verb=True,
restore_size=True, forceupdate=False,
path_data=None, path_X=None,
config_description_2d=None, config_occ=None):
import tofu.imas2tofu as _tfimas
_tfimas._save_to_imas(self, tfversion=__version__,
shot=shot, run=run, refshot=refshot,
refrun=refrun, user=user, database=database,
version=version, occ=occ, dryrun=dryrun, verb=verb,
ids=ids, deep=deep,
restore_size=restore_size,
forceupdate=forceupdate,
path_data=path_data, path_X=path_X,
config_description_2d=config_description_2d,
config_occ=config_occ)
#----------------------------
# Operator overloading section
@staticmethod
def _extract_common_params(obj0, obj1=None):
if obj1 is None:
Id = obj0.Id.copy()
Id._dall['Name'] += 'modified'
dcom = {'Id':Id,
'dchans':obj0._dchans, 'dlabels':obj0.dlabels,
't':obj0.t, 'X':obj0.X,
'lCam':obj0.lCam, 'config':obj0.config,
'dextra':obj0.dextra}
if dcom['lCam'] is not None:
dcom['config'] = None
else:
ls = ['SavePath', 'Diag', 'Exp', 'shot']
dcom = {ss:getattr(obj0.Id,ss) for ss in ls
if getattr(obj0.Id,ss) == getattr(obj1.Id,ss)}
if obj0._dchans == obj1._dchans:
dcom['dchans'] = obj0._dchans
if obj0.dlabels == obj1.dlabels:
dcom['dlabels'] = obj0.dlabels
if obj0.dextra == obj1.dextra:
dcom['dextra'] = obj0.dextra
if np.allclose(obj0.t, obj1.t):
dcom['t'] = obj0.t
if np.allclose(obj0.X, obj1.X):
dcom['X'] = obj0.X
if obj0.lCam is not None and obj1.lCam is not None:
if all([c0 == c1 for c0, c1 in zip(obj0.lCam, obj1.lCam)]):
dcom['lCam'] = obj0.lCam
if obj0.config == obj1.config:
dcom['config'] = obj0.config
return dcom
@staticmethod
def _recreatefromoperator(d0, other, opfunc):
if other is None:
data = opfunc(d0.data)
dcom = d0._extract_common_params(d0)
elif type(other) in [int, float, np.int64, np.float64]:
data = opfunc(d0.data, other)
dcom = d0._extract_common_params(d0)
elif isinstance(other, np.ndarray):
data = opfunc(d0.data, other)
dcom = d0._extract_common_params(d0)
elif issubclass(other.__class__, DataAbstract):
if other.__class__.__name__ != d0.__class__.__name__:
msg = 'Operator overloaded only for same-class instances:\n'
msg += " - provided: %s and %s"%(d0.__class__.__name__,
other.__class__.__name__)
raise Exception(msg)
try:
data = opfunc(d0.data, other.data)
except Exception as err:
msg = str(err)
msg += "\n\ndata shapes not matching !"
raise Exception(msg)
dcom = d0._extract_common_params(d0, other)
else:
msg = "Behaviour not implemented !"
raise NotImplementedError(msg)
return d0.__class__(data=data, Name='New', **dcom)
def __abs__(self):
opfunc = lambda x: np.abs(x)
data = self._recreatefromoperator(self, None, opfunc)
return data
def __sub__(self, other):
opfunc = lambda x, y: x-y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __rsub__(self, other):
opfunc = lambda x, y: x-y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __add__(self, other):
opfunc = lambda x, y: x+y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __radd__(self, other):
opfunc = lambda x, y: x+y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __mul__(self, other):
opfunc = lambda x, y: x*y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __rmul__(self, other):
opfunc = lambda x, y: x*y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __truediv__(self, other):
opfunc = lambda x, y: x/y
data = self._recreatefromoperator(self, other, opfunc)
return data
def __pow__(self, other):
opfunc = lambda x, y: x**y
data = self._recreatefromoperator(self, other, opfunc)
return data
#####################################################################
# Data1D and Data2D
#####################################################################
sig = inspect.signature(DataAbstract)
params = sig.parameters
class DataCam1D(DataAbstract):
""" Data object used for 1D cameras or list of 1D cameras """
@classmethod
def _isSpectral(cls): return False
@classmethod
def _is2D(cls): return False
lp = [p for p in params.values() if p.name not in ['lamb','dX12']]
DataCam1D.__signature__ = sig.replace(parameters=lp)
class DataCam1DSpectral(DataCam1D):
""" Data object used for 1D cameras or list of 1D cameras """
@classmethod
def _isSpectral(cls): return True
@property
def lamb(self):
return self.get_ddata('lamb')
@property
def nlamb(self):
return self.get_ddata('nlamb')
lp = [p for p in params.values() if p.name not in ['dX12']]
DataCam1D.__signature__ = sig.replace(parameters=lp)
class DataCam2D(DataAbstract):
""" Data object used for 2D cameras or list of 2D cameras """
@classmethod
def _isSpectral(cls): return False
@classmethod
def _is2D(cls): return True
def _checkformat_dX12(self, dX12=None):
lc = [dX12 is None, dX12 == 'geom' or dX12 == {'from':'geom'},
isinstance(dX12, dict) and dX12 != {'from':'geom'}]
if not np.sum(lc) == 1:
msg = ("dX12 must be either:\n"
+ "\t- None\n"
+ "\t- 'geom' : will be derived from the cam geometry\n"
+ "\t- dict : containing {'x1' : array of coords.,\n"
+ "\t 'x2' : array of coords.,\n"
+ "\t 'ind1': array of int indices,\n"
+ "\t 'ind2': array of int indices}")
raise Exception(msg)
if lc[1]:
ls = self._get_keys_dX12()
c0 = self._dgeom['lCam'] is not None
c1 = c0 and len(self._dgeom['lCam']) == 1
c2 = c1 and self._dgeom['lCam'][0].dX12 is not None
if not c2:
msg = "dX12 cannot be derived from dgeom['lCam'][0].dX12 !"
raise Exception(msg)
dX12 = {'from':'geom'}
elif lc[2]:
ls = ['x1','x2','ind1','ind2']
assert all([ss in dX12.keys() for ss in ls])
x1 = np.asarray(dX12['x1']).ravel()
x2 = np.asarray(dX12['x2']).ravel()
n1, n2 = x1.size, x2.size
ind1, ind2, indr = self._get_ind12r_n12(ind1=dX12['ind1'],
ind2=dX12['ind2'],
n1=n1, n2=n2)
dX12 = {'x1':x1, 'x2':x2, 'n1':n1, 'n2':n2,
'ind1':ind1, 'ind2':ind2, 'indr':indr, 'from':'self'}
return dX12
def set_dX12(self, dX12=None):
dX12 = self._checkformat_dX12(dX12)
self._dX12.update(dX12)
@property
def dX12(self):
if self._dX12 is not None and self._dX12['from'] == 'geom':
dX12 = self._dgeom['lCam'][0].dX12
else:
dX12 = self._dX12
return dX12
def get_X12plot(self, plot='imshow'):
assert self.dX12 is not None
if plot == 'imshow':
x1, x2 = self.dX12['x1'], self.dX12['x2']
x1min, Dx1min = x1[0], 0.5*(x1[1]-x1[0])
x1max, Dx1max = x1[-1], 0.5*(x1[-1]-x1[-2])
x2min, Dx2min = x2[0], 0.5*(x2[1]-x2[0])
x2max, Dx2max = x2[-1], 0.5*(x2[-1]-x2[-2])
extent = (x1min - Dx1min, x1max + Dx1max,
x2min - Dx2min, x2max + Dx2max)
indr = self.dX12['indr']
return x1, x2, indr, extent
lp = [p for p in params.values() if p.name not in ['lamb']]
DataCam2D.__signature__ = sig.replace(parameters=lp)
class DataCam2DSpectral(DataCam2D):
""" Data object used for 1D cameras or list of 1D cameras """
@classmethod
def _isSpectral(cls): return True
@property
def lamb(self):
return self.get_ddata('lamb')
@property
def nlamb(self):
return self.get_ddata('nlamb')
lp = [p for p in params.values()]
DataCam2D.__signature__ = sig.replace(parameters=lp)
# ####################################################################
# ####################################################################
# Plasma2D
# ####################################################################
# ####################################################################
class Plasma2D(utils.ToFuObject):
""" A generic class for handling 2D (and 1D) plasma profiles
Provides:
- equilibrium-related quantities
- any 1d profile (can be remapped on 2D equilibrium)
- spatial interpolation methods
"""
# Fixed (class-wise) dictionary of default properties
_ddef = {'Id': {'include': ['Mod', 'Cls', 'Exp', 'Diag',
'Name', 'shot', 'version']},
'dtreat': {'order': ['mask', 'interp-indt', 'interp-indch',
'data0', 'dfit',
'indt', 'indch', 'indlamb', 'interp-t']}}
def __init_subclass__(cls, **kwdargs):
# Does not exist before Python 3.6 !!!
# Python 2
super(Plasma2D, cls).__init_subclass__(**kwdargs)
# Python 3
# super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(Plasma2D._ddef)
# cls._dplot = copy.deepcopy(Struct._dplot)
# cls._set_color_ddef(cls._color)
def __init__(self, dtime=None, dradius=None, d0d=None, d1d=None,
d2d=None, dmesh=None, config=None,
Id=None, Name=None, Exp=None, shot=None,
fromdict=None, sep=None, SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude):
# Create a dplot at instance level
#self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs['self']
# super()
super(Plasma2D,self).__init__(**kwdargs)
def _reset(self):
# super()
super(Plasma2D,self)._reset()
self._dgroup = dict.fromkeys(self._get_keys_dgroup())
self._dindref = dict.fromkeys(self._get_keys_dindref())
self._ddata = dict.fromkeys(self._get_keys_ddata())
self._dgeom = dict.fromkeys(self._get_keys_dgeom())
@classmethod
def _checkformat_inputs_Id(cls, Id=None, Name=None,
Exp=None, shot=None,
include=None, **kwdargs):
if Id is not None:
assert isinstance(Id,utils.ID)
Name, Exp, shot = Id.Name, Id.Exp, Id.shot
assert type(Name) is str, Name
assert type(Exp) is str, Exp
if include is None:
include = cls._ddef['Id']['include']
assert shot is None or type(shot) in [int,np.int64]
if shot is None:
if 'shot' in include:
include.remove('shot')
else:
shot = int(shot)
if 'shot' not in include:
include.append('shot')
kwdargs.update({'Name':Name, 'Exp':Exp, 'shot':shot,
'include':include})
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_dindrefdatagroup():
largs = ['dtime', 'dradius', 'dmesh', 'd0d', 'd1d', 'd2d']
return largs
@staticmethod
def _get_largs_dgeom():
largs = ['config']
return largs
###########
# Get check and format inputs
###########
#---------------------
# Methods for checking and formatting inputs
#---------------------
@staticmethod
def _extract_dnd(dnd, k0,
dim_=None, quant_=None, name_=None,
origin_=None, units_=None):
# Set defaults
dim_ = k0 if dim_ is None else dim_
quant_ = k0 if quant_ is None else quant_
name_ = k0 if name_ is None else name_
origin_ = 'unknown' if origin_ is None else origin_
units_ = 'a.u.' if units_ is None else units_
# Extrac
dim = dnd[k0].get('dim', None)
if dim is None:
dim = dim_
quant = dnd[k0].get('quant', None)
if quant is None:
quant = quant_
origin = dnd[k0].get('origin', None)
if origin is None:
origin = origin_
name = dnd[k0].get('name', None)
if name is None:
name = name_
units = dnd[k0].get('units', None)
if units is None:
units = units_
return dim, quant, origin, name, units
@staticmethod
def _checkformat_dtrm(dtime=None, dradius=None, dmesh=None,
d0d=None, d1d=None, d2d=None):
dd = {'dtime':dtime, 'dradius':dradius, 'dmesh':dmesh,
'd0d':d0d, 'd1d':d1d, 'd2d':d2d}
# Define allowed keys for each dict
lkok = ['data', 'dim', 'quant', 'name', 'origin', 'units',
'depend']
lkmeshmax = ['type', 'ftype', 'nodes', 'faces', 'R', 'Z', 'shapeRZ',
'nfaces', 'nnodes', 'mpltri', 'size', 'ntri']
lkmeshmin = ['type', 'ftype']
dkok = {'dtime': {'max':lkok, 'min':['data'], 'ndim':[1]},
'dradius':{'max':lkok, 'min':['data'], 'ndim':[1,2]},
'd0d':{'max':lkok, 'min':['data'], 'ndim':[1,2,3]},
'd1d':{'max':lkok, 'min':['data'], 'ndim':[1,2]},
'd2d':{'max':lkok, 'min':['data'], 'ndim':[1,2]}}
dkok['dmesh'] = {'max':lkok + lkmeshmax, 'min':lkmeshmin}
# Check each dict independently
for dk, dv in dd.items():
if dv is None or len(dv) == 0:
dd[dk] = {}
continue
c0 = type(dv) is not dict or any([type(k0) is not str
for k0 in dv.keys()])
c0 = any([type(k0) is not str or type(v0) is not dict
for k0, v0 in dv.items()])
if c0:
msg = "Arg %s must be a dict with:\n"
msg += " - (key, values) of type (str, dict)"
raise Exception(msg)
for k0, v0 in dv.items():
c0 = any([k1 not in dkok[dk]['max'] for k1 in v0.keys()])
c0 = c0 or any([v0.get(k1,None) is None
for k1 in dkok[dk]['min']])
if c0:
msg = "Arg %s[%s] must be a dict with keys in:\n"%(dk,k0)
msg += " - %s\n"%str(dkok[dk]['max'])
msg += "And with at least the following keys:\n"
msg += " - %s\n"%str(dkok[dk]['min'])
msg += "Provided:\n"
msg += " - %s\n"%str(v0.keys())
msg += "Missing:\n"
msg += " - %s\n"%str(set(dkok[dk]['min']).difference(v0.keys()))
msg += "Non-valid:\n"
msg += " - %s"%str(set(v0.keys()).difference(dkok[dk]['max']))
raise Exception(msg)
if 'data' in dkok[dk]['min']:
dd[dk][k0]['data'] = np.atleast_1d(np.squeeze(v0['data']))
if dd[dk][k0]['data'].ndim not in dkok[dk]['ndim']:
msg = "%s[%s]['data'] has wrong dimensions:\n"%(dk,k0)
msg += " - Expected: %s\n"%str(dkok[dk]['ndim'])
msg += " - Provided: %s"%str(dd[dk][k0]['data'].ndim)
raise Exception(msg)
# mesh
if dk == 'dmesh':
lmok = ['rect', 'tri', 'quadtri']
if v0['type'] not in lmok:
msg = ("Mesh['type'] should be in {}\n".format(lmok)
+ "\t- Provided: {}".format(v0['type']))
raise Exception(msg)
if v0['type'] == 'rect':
c0 = all([ss in v0.keys() and v0[ss].ndim in [1, 2]
for ss in ['R', 'Z']])
if not c0:
msg = ("A mesh of type 'rect' must have attr.:\n"
+ "\t- R of dim in [1, 2]\n"
+ "\t- Z of dim in [1, 2]")
raise Exception(msg)
shapeu = np.unique(np.r_[v0['R'].shape, v0['Z'].shape])
shapeRZ = v0['shapeRZ']
if shapeRZ is None:
shapeRZ = [None, None]
else:
shapeRZ = list(shapeRZ)
if v0['R'].ndim == 1:
if np.any(np.diff(v0['R']) <= 0.):
msg = "Non-increasing R"
raise Exception(msg)
R = v0['R']
else:
lc = [np.all(np.diff(v0['R'][0, :])) > 0.,
np.all(np.diff(v0['R'][:, 0])) > 0.]
if np.sum(lc) != 1:
msg = "Impossible to know R dimension!"
raise Exception(msg)
if lc[0]:
R = v0['R'][0, :]
if shapeRZ[1] is None:
shapeRZ[1] = 'R'
if shapeRZ[1] != 'R':
msg = "Inconsistent shapeRZ"
raise Exception(msg)
else:
R = v0['R'][:, 0]
if shapeRZ[0] is None:
shapeRZ[0] = 'R'
if shapeRZ[0] != 'R':
msg = "Inconsistent shapeRZ"
raise Exception(msg)
if v0['Z'].ndim == 1:
if np.any(np.diff(v0['Z']) <= 0.):
msg = "Non-increasing Z"
raise Exception(msg)
Z = v0['Z']
else:
lc = [np.all(np.diff(v0['Z'][0, :])) > 0.,
np.all(np.diff(v0['Z'][:, 0])) > 0.]
if np.sum(lc) != 1:
msg = "Impossible to know R dimension!"
raise Exception(msg)
if lc[0]:
Z = v0['Z'][0, :]
if shapeRZ[1] is None:
shapeRZ[1] = 'Z'
if shapeRZ[1] != 'Z':
msg = "Inconsistent shapeRZ"
raise Exception(msg)
else:
Z = v0['Z'][:, 0]
if shapeRZ[0] is None:
shapeRZ[0] = 'Z'
if shapeRZ[0] != 'Z':
msg = "Inconsistent shapeRZ"
raise Exception(msg)
shapeRZ = tuple(shapeRZ)
if shapeRZ not in [('R', 'Z'), ('Z', 'R')]:
msg = "Inconsistent shapeRZ"
raise Exception(msg)
if None in shapeRZ:
msg = ("Please provide shapeRZ "
+ " = ('R', 'Z') or ('Z', 'R')\n"
+ "Could not be inferred from data itself")
raise Exception(msg)
def trifind(r, z,
Rbin=0.5*(R[1:] + R[:-1]),
Zbin=0.5*(Z[1:] + Z[:-1]),
nR=R.size, nZ=Z.size,
shapeRZ=shapeRZ):
indR = np.searchsorted(Rbin, r)
indZ = np.searchsorted(Zbin, z)
if shapeRZ == ('R', 'Z'):
indpts = indR*nZ + indZ
else:
indpts = indZ*nR + indR
indout = ((r < R[0]) | (r > R[-1])
| (z < Z[0]) | (z > Z[-1]))
indpts[indout] = -1
return indpts
dd[dk][k0]['R'] = R
dd[dk][k0]['Z'] = Z
dd[dk][k0]['shapeRZ'] = shapeRZ
dd[dk][k0]['nR'] = R.size
dd[dk][k0]['nZ'] = Z.size
dd[dk][k0]['trifind'] = trifind
if dd[dk][k0]['ftype'] != 0:
msg = "Linear interpolation not handled yet !"
raise Exception(msg)
dd[dk][k0]['size'] = R.size*Z.size
else:
ls = ['nodes', 'faces']
if not all([s in v0.keys() for s in ls]):
msg = ("The following keys should be in dmesh:\n"
+ "\t- {}".format(ls))
raise Exception(msg)
func = np.atleast_2d
dd[dk][k0]['nodes'] = func(v0['nodes']).astype(float)
dd[dk][k0]['faces'] = func(v0['faces']).astype(int)
nnodes = dd[dk][k0]['nodes'].shape[0]
nfaces = dd[dk][k0]['faces'].shape[0]
# Test for duplicates
nodesu = np.unique(dd[dk][k0]['nodes'], axis=0)
facesu = np.unique(dd[dk][k0]['faces'], axis=0)
lc = [nodesu.shape[0] != nnodes,
facesu.shape[0] != nfaces]
if any(lc):
msg = "Non-valid mesh {0}[{1}]: \n".format(dk, k0)
if lc[0]:
ndup = nnodes - nodesu.shape[0]
ndsh = dd[dk][k0]['nodes'].shape
undsh = nodesu.shape
msg += (
" Duplicate nodes: {}\n".format(ndup)
+ "\t- nodes.shape: {}\n".format(ndsh)
+ "\t- unique shape: {}\n".format(undsh))
if lc[1]:
ndup = str(nfaces - facesu.shape[0])
facsh = str(dd[dk][k0]['faces'].shape)
ufacsh = str(facesu.shape)
msg += (
" Duplicate faces: {}\n".format(ndup)
+ "\t- faces.shape: {}\n".format(facsh)
+ "\t- unique shape: {}".format(ufacsh))
raise Exception(msg)
# Test for unused nodes
facesu = np.unique(facesu)
c0 = np.all(facesu >= 0) and facesu.size == nnodes
if not c0:
ino = str([ii for ii in range(0, nnodes)
if ii not in facesu])
msg = "Unused nodes in {0}[{1}]:\n".format(dk, k0)
msg += " - unused nodes indices: {}".format(ino)
warnings.warn(msg)
dd[dk][k0]['nnodes'] = dd[dk][k0].get('nnodes', nnodes)
dd[dk][k0]['nfaces'] = dd[dk][k0].get('nfaces', nfaces)
assert dd[dk][k0]['nodes'].shape == (v0['nnodes'], 2)
assert np.max(dd[dk][k0]['faces']) < v0['nnodes']
# Only triangular meshes so far
assert v0['type'] in ['tri', 'quadtri'], v0['type']
if 'tri' in v0['type']:
fshap = dd[dk][k0]['faces'].shape
fshap0 = (v0['nfaces'], 3)
if fshap != fshap0:
msg = ("Wrong shape of {}[{}]\n".format(dk, k0)
+ "\t- Expected: {}\n".format(fshap0)
+ "\t- Provided: {}".format(fshap))
raise Exception(msg)
if v0.get('mpltri', None) is None:
dd[dk][k0]['mpltri'] = mplTri(
dd[dk][k0]['nodes'][:, 0],
dd[dk][k0]['nodes'][:, 1],
dd[dk][k0]['faces'])
assert isinstance(dd[dk][k0]['mpltri'], mplTri)
assert dd[dk][k0]['ftype'] in [0, 1]
ntri = dd[dk][k0]['ntri']
if dd[dk][k0]['ftype'] == 1:
dd[dk][k0]['size'] = dd[dk][k0]['nnodes']
else:
dd[dk][k0]['size'] = int(
dd[dk][k0]['nfaces'] / ntri
)
# Check unicity of all keys
lk = [list(dv.keys()) for dv in dd.values()]
lk = list(itt.chain.from_iterable(lk))
lku = sorted(set(lk))
lk = ['{0} : {1} times'.format(kk, str(lk.count(kk)))
for kk in lku if lk.count(kk) > 1]
if len(lk) > 0:
msg = ("Each key of (dtime, dradius, dmesh, d0d, d1d, d2d)"
+ " must be unique !\n"
+ "The following keys are repeated :\n"
+ " - " + "\n - ".join(lk))
raise Exception(msg)
dtime, dradius, dmesh = dd['dtime'], dd['dradius'], dd['dmesh']
d0d, d1d, d2d = dd['d0d'], dd['d1d'], dd['d2d']
return dtime, dradius, dmesh, d0d, d1d, d2d
def _checkformat_inputs_dgeom(self, config=None):
if config is not None:
assert issubclass(config.__class__, utils.ToFuObject)
return config
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dgroup():
lk = ['time', 'radius', 'mesh']
return lk
@staticmethod
def _get_keys_dindref():
lk = []
return lk
@staticmethod
def _get_keys_ddata():
lk = []
return lk
@staticmethod
def _get_keys_dgeom():
lk = ['config']
return lk
###########
# _init
###########
def _init(self, dtime=None, dradius=None, dmesh=None,
d0d=None, d1d=None, d2d=None,
config=None, **kwargs):
kwdargs = locals()
kwdargs.update(**kwargs)
largs = self._get_largs_dindrefdatagroup()
kwdindrefdatagroup = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dgeom()
kwdgeom = self._extract_kwdargs(kwdargs, largs)
self._set_dindrefdatagroup(**kwdindrefdatagroup)
self.set_dgeom(**kwdgeom)
self._dstrip['strip'] = 0
###########
# set dictionaries
###########
@staticmethod
def _find_lref(shape=None, k0=None, dd=None, ddstr=None,
dindref=None, lrefname=['t','radius']):
if 'depend' in dd[k0].keys():
lref = dd[k0]['depend']
else:
lref = [[kk for kk, vv in dindref.items()
if vv['size'] == sh and vv['group'] in lrefname]
for sh in shape]
lref = list(itt.chain.from_iterable(lref))
if len(lref) < len(shape):
msg = "Maybe not enoough references for %s[%s]:\n"%(ddstr,k0)
msg += " - shape: %s\n"%str(shape)
msg += " - lref: %s"%str(lref)
warnings.warn(msg)
if len(lref) > len(shape):
msg = "Too many references for %s[%s]:\n"%(ddstr,k0)
msg += " - shape: %s\n"%str(shape)
msg += " - lref: %s"%str(lref)
raise Exception(msg)
return lref
def _set_dindrefdatagroup(self, dtime=None, dradius=None, dmesh=None,
d0d=None, d1d=None, d2d=None):
# Check dtime is not None
out = self._checkformat_dtrm(dtime=dtime, dradius=dradius, dmesh=dmesh,
d0d=d0d, d1d=d1d, d2d=d2d)
dtime, dradius, dmesh, d0d, d1d, d2d = out
dgroup, dindref, ddata = {}, {}, {}
empty = {}
# Get indt
if dtime is not None:
for k0 in dtime.keys():
out = self._extract_dnd(dtime,k0,
dim_='time', quant_='t',
name_=k0, units_='s')
dim, quant, origin, name, units = out
assert k0 not in dindref.keys()
dtime[k0]['data'] = np.atleast_1d(np.squeeze(dtime[k0]['data']))
assert dtime[k0]['data'].ndim == 1
dindref[k0] = {'size':dtime[k0]['data'].size,
'group':'time'}
assert k0 not in ddata.keys()
ddata[k0] = {'data':dtime[k0]['data'],
'dim':dim, 'quant':quant, 'name':name,
'origin':origin, 'units':units, 'depend':(k0,)}
# d0d
if d0d is not None:
for k0 in d0d.keys():
out = self._extract_dnd(d0d,k0)
dim, quant, origin, name, units = out
# data
d0d[k0]['data'] = np.atleast_1d(np.squeeze(d0d[k0]['data']))
assert d0d[k0]['data'].ndim >= 1
depend = self._find_lref(d0d[k0]['data'].shape, k0, dd=d0d,
ddstr='d0d', dindref=dindref,
lrefname=['t'])
assert len(depend) == 1 and dindref[depend[0]]['group']=='time'
assert k0 not in ddata.keys()
ddata[k0] = {'data':d0d[k0]['data'],
'dim':dim, 'quant':quant, 'name':name,
'units':units, 'origin':origin, 'depend':depend}
# get radius
if dradius is not None:
for k0 in dradius.keys():
out = self._extract_dnd(dradius, k0, name_=k0)
dim, quant, origin, name, units = out
assert k0 not in dindref.keys()
data = np.atleast_1d(np.squeeze(dradius[k0]['data']))
assert data.ndim in [1,2]
if len(dradius[k0].get('depend',[1])) == 1:
assert data.ndim == 1
size = data.size
else:
lkt = [k for k in dtime.keys() if k in dradius[k0]['depend']]
assert len(lkt) == 1
axist = dradius[k0]['depend'].index(lkt[0])
# Handle cases with only 1 time step
if data.ndim == 1:
assert dindref[lkt[0]]['size'] == 1
data = data.reshape((1, data.size))
size = data.shape[1-axist]
dindref[k0] = {'size':size,
'group':'radius'}
assert k0 not in ddata.keys()
depend = self._find_lref(data.shape, k0, dd=dradius,
ddstr='dradius', dindref=dindref,
lrefname=['t','radius'])
ddata[k0] = {'data':data,
'dim':dim, 'quant':quant, 'name':name,
'origin':origin, 'units':units, 'depend':depend}
# Get d1d
if d1d is not None:
for k0 in d1d.keys():
out = self._extract_dnd(d1d,k0)
dim, quant, origin, name, units = out
d1d[k0]['data'] = np.atleast_2d(np.squeeze(d1d[k0]['data']))
assert d1d[k0]['data'].ndim == 2
# data
depend = self._find_lref(d1d[k0]['data'].shape, k0, dd=d1d,
ddstr='d1d', dindref=dindref,
lrefname=['t','radius'])
assert k0 not in ddata.keys()
ddata[k0] = {'data':d1d[k0]['data'],
'dim':dim, 'quant':quant, 'name':name,
'units':units, 'origin':origin, 'depend':depend}
# dmesh ref
if dmesh is not None:
for k0 in dmesh.keys():
out = self._extract_dnd(dmesh, k0, dim_='mesh')
dim, quant, origin, name, units = out
assert k0 not in dindref.keys()
dindref[k0] = {'size':dmesh[k0]['size'],
'group':'mesh'}
assert k0 not in ddata.keys()
ddata[k0] = {'data':dmesh[k0],
'dim':dim, 'quant':quant, 'name':name,
'units':units, 'origin':origin, 'depend':(k0,)}
# d2d
if d2d is not None:
for k0 in d2d.keys():
out = self._extract_dnd(d2d,k0)
dim, quant, origin, name, units = out
d2d[k0]['data'] = np.atleast_2d(np.squeeze(d2d[k0]['data']))
assert d2d[k0]['data'].ndim == 2
depend = self._find_lref(d2d[k0]['data'].shape, k0, dd=d2d,
ddstr='d2d', dindref=dindref,
lrefname=['t','mesh'])
assert k0 not in ddata.keys()
ddata[k0] = {'data':d2d[k0]['data'],
'dim':dim, 'quant':quant, 'name':name,
'units':units, 'origin':origin, 'depend':depend}
# dgroup
dgroup = {}
if len(dtime) > 0:
dgroup['time'] = {'dref': list(dtime.keys())[0]}
if len(dradius) > 0:
dgroup['radius'] = {'dref': list(dradius.keys())[0]}
if len(dmesh) > 0:
dgroup['mesh'] = {'dref': list(dmesh.keys())[0]}
# Update dict
self._dgroup = dgroup
self._dindref = dindref
self._ddata = ddata
# Complement
self._complement()
def _complement(self):
# --------------
# ddata
for k0, v0 in self.ddata.items():
lindout = [ii for ii in v0['depend'] if ii not in self.dindref.keys()]
if not len(lindout) == 0:
msg = ("ddata[{}]['depend'] keys not in dindref:\n".format(k0)
+ " - " + "\n - ".join(lindout))
raise Exception(msg)
self.ddata[k0]['lgroup'] = [self.dindref[ii]['group']
for ii in v0['depend']]
type_ = type(v0['data'])
shape = tuple([self.dindref[ii]['size'] for ii in v0['depend']])
# if only one dim => mesh or iterable or unspecified
if len(shape) == 1 or type_ is dict:
c0 = type_ is dict and 'mesh' in self.ddata[k0]['lgroup']
c1 = not c0 and len(v0['data']) == shape[0]
if not (c0 or c1):
msg = ("Signal {}['data'] should be either:\n".format(k0)
+ "\t- dict: a mesh\n"
+ "\t- iterable of len() = "
+ "{} (shape[0] of ref)\n".format(shape[0])
+ " You provided:\n"
+ "\t- type: {}\n".format(type_)
+ "\t- len(): {}\n".format(len(v0['data']))
+ "\t- {}['data']: {}".format(k0, v0['data']))
raise Exception(msg)
else:
assert type(v0['data']) is np.ndarray
assert v0['data'].shape == shape
# --------------
# dindref
for k0 in self.dindref.keys():
self.dindref[k0]['ldata'] = [kk for kk, vv in self.ddata.items()
if k0 in vv['depend']]
assert self.dindref[k0]['group'] in self.dgroup.keys()
# --------------
# dgroup
for gg, vg in self.dgroup.items():
lindref = [id_ for id_,vv in self.dindref.items()
if vv['group'] == gg]
ldata = [id_ for id_ in self.ddata.keys()
if any([id_ in self.dindref[vref]['ldata']
for vref in lindref])]
#assert vg['depend'] in lidindref
self.dgroup[gg]['lindref'] = lindref
self.dgroup[gg]['ldata'] = ldata
def set_dgeom(self, config=None):
config = self._checkformat_inputs_dgeom(config=config)
self._dgeom = {'config':config}
###########
# strip dictionaries
###########
def _strip_ddata(self, strip=0):
pass
def _strip_dgeom(self, strip=0, force=False, verb=True):
if self._dstrip['strip']==strip:
return
if strip in [0] and self._dstrip['strip'] in [1]:
config = None
if self._dgeom['config'] is not None:
assert type(self._dgeom['config']) is str
config = utils.load(self._dgeom['config'], verb=verb)
self._set_dgeom(config=config)
elif strip in [1] and self._dstrip['strip'] in [0]:
if self._dgeom['config'] is not None:
path = self._dgeom['config'].Id.SavePath
name = self._dgeom['config'].Id.SaveName
pfe = os.path.join(path, name+'.npz')
lf = os.listdir(path)
lf = [ff for ff in lf if name+'.npz' in ff]
exist = len(lf)==1
if not exist:
msg = """BEWARE:
You are about to delete the config object
Only the path/name to saved a object will be kept
But it appears that the following object has no
saved file where specified (obj.Id.SavePath)
Thus it won't be possible to retrieve it
(unless available in the current console:"""
msg += "\n - {0}".format(pfe)
if force:
warnings.warn(msg)
else:
raise Exception(msg)
self._dgeom['config'] = pfe
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip['allowed'] = [0,1]
nMax = max(cls._dstrip['allowed'])
doc = """
1: dgeom pathfiles
"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0, verb=True):
# super()
super(Plasma2D,self).strip(strip=strip, verb=verb)
def _strip(self, strip=0, verb=True):
self._strip_dgeom(strip=strip, verb=verb)
def _to_dict(self):
dout = {'dgroup':{'dict':self._dgroup, 'lexcept':None},
'dindref':{'dict':self._dindref, 'lexcept':None},
'ddata':{'dict':self._ddata, 'lexcept':None},
'dgeom':{'dict':self._dgeom, 'lexcept':None}}
return dout
def _from_dict(self, fd):
self._dgroup.update(**fd['dgroup'])
self._dindref.update(**fd['dindref'])
self._ddata.update(**fd['ddata'])
self._dgeom.update(**fd['dgeom'])
###########
# properties
###########
@property
def dgroup(self):
return self._dgroup
@property
def dindref(self):
return self._dindref
@property
def ddata(self):
return self._ddata
@property
def dtime(self):
return dict([(kk, self._ddata[kk]) for kk,vv in self._dindref.items()
if vv['group'] == 'time'])
@property
def dradius(self):
return dict([(kk, self._ddata[kk]) for kk,vv in self._dindref.items()
if vv['group'] == 'radius'])
@property
def dmesh(self):
return dict([(kk, self._ddata[kk]) for kk,vv in self._dindref.items()
if vv['group'] == 'mesh'])
@property
def config(self):
return self._dgeom['config']
#---------------------
# Read-only for internal use
#---------------------
@property
def _lquantboth(self):
""" Return list of quantities available both in 1d and 2d """
lq1 = [self._ddata[vd]['quant'] for vd in self._dgroup['radius']['ldata']]
lq2 = [self._ddata[vd]['quant'] for vd in self._dgroup['mesh']['ldata']]
lq = list(set(lq1).intersection(lq2))
return lq
def _get_ldata(self, dim=None, quant=None, name=None,
units=None, origin=None,
indref=None, group=None, log='all', return_key=True):
assert log in ['all','any','raw']
lid = np.array(list(self._ddata.keys()))
ind = np.ones((7,len(lid)),dtype=bool)
if dim is not None:
ind[0,:] = [self._ddata[id_]['dim'] == dim for id_ in lid]
if quant is not None:
ind[1,:] = [self._ddata[id_]['quant'] == quant for id_ in lid]
if name is not None:
ind[2,:] = [self._ddata[id_]['name'] == name for id_ in lid]
if units is not None:
ind[3,:] = [self._ddata[id_]['units'] == units for id_ in lid]
if origin is not None:
ind[4,:] = [self._ddata[id_]['origin'] == origin for id_ in lid]
if indref is not None:
ind[5,:] = [depend in self._ddata[id_]['depend'] for id_ in lid]
if group is not None:
ind[6,:] = [group in self._ddata[id_]['lgroup'] for id_ in lid]
if log == 'all':
ind = np.all(ind, axis=0)
elif log == 'any':
ind = np.any(ind, axis=0)
if return_key:
if np.any(ind):
out = lid[ind.nonzero()[0]]
else:
out = np.array([],dtype=int)
else:
out = ind, lid
return out
def _get_keyingroup(self, key, group=None, msgstr=None, raise_=False):
if key in self._ddata.keys():
lg = self._ddata[key]['lgroup']
if group is None or group in lg:
return key, None
else:
msg = ("Required data key does not have matching group:\n"
+ "\t- ddata[{}]['lgroup'] = {}\n".format(key, lg)
+ "\t- Expected group: {}".format(group))
if raise_:
raise Exception(msg)
ind, akeys = self._get_ldata(dim=key, quant=key, name=key, units=key,
origin=key, group=group, log='raw',
return_key=False)
# Remove indref and group
ind = ind[:5,:] & ind[-1,:]
# Any perfect match ?
nind = np.sum(ind, axis=1)
sol = (nind == 1).nonzero()[0]
key, msg = None, None
if sol.size > 0:
if np.unique(sol).size == 1:
indkey = ind[sol[0],:].nonzero()[0]
key = akeys[indkey][0]
else:
lstr = "[dim,quant,name,units,origin]"
msg = "Several possible matches in {} for {}".format(lstr, key)
else:
lstr = "[dim,quant,name,units,origin]"
msg = "No match in {} for {} in group {}".format(lstr, key, group)
if msg is not None:
msg += "\n\nRequested {} could not be identified!\n".format(msgstr)
msg += "Please provide a valid (unique) key/name/quant/dim:\n\n"
msg += self.get_summary(verb=False, return_='msg')
if raise_:
raise Exception(msg)
return key, msg
#---------------------
# Methods for showing data
#---------------------
def get_summary(self, sep=' ', line='-', just='l',
table_sep=None, verb=True, return_=False):
""" Summary description of the object content """
# # Make sure the data is accessible
# msg = "The data is not accessible because self.strip(2) was used !"
# assert self._dstrip['strip']<2, msg
# -----------------------
# Build for ddata
col0 = ['group key', 'nb. indref']
ar0 = [(k0, len(v0['lindref'])) for k0,v0 in self._dgroup.items()]
# -----------------------
# Build for ddata
col1 = ['indref key', 'group', 'size']
ar1 = [(k0, v0['group'], v0['size']) for k0,v0 in self._dindref.items()]
# -----------------------
# Build for ddata
col2 = ['data key', 'origin', 'dim', 'quant',
'name', 'units', 'shape', 'depend', 'lgroup']
ar2 = []
for k0,v0 in self._ddata.items():
if type(v0['data']) is np.ndarray:
shape = str(v0['data'].shape)
else:
shape = v0['data'].__class__.__name__
lu = [k0, v0['origin'], v0['dim'], v0['quant'], v0['name'],
v0['units'], shape,
str(v0['depend']), str(v0['lgroup'])]
ar2.append(lu)
return self._get_summary([ar0,ar1,ar2], [col0, col1, col2],
sep=sep, line=line, table_sep=table_sep,
verb=verb, return_=return_)
#---------------------
# Methods for adding ref / quantities
#---------------------
def _checkformat_addref(self, key=None, data=None, group=None,
dim=None, quant=None, units=None,
origin=None, name=None,
comments=None, delimiter=None):
# Check data
lc = [isinstance(data, np.ndarray),
isinstance(data, dict),
isinstance(data, str) and os.path.isfile(data)]
if not any(lc):
msg = ("Arg data must be either:\n"
+ "\t- np.ndarray: a 1d array\n"
+ "\t- dict: a dict containing a 2d mesh\n"
+ "\t- str: an absolute path to an existing file\n"
+ "You provided:\n{}".format(data))
raise Exception(msg)
# If file: check content and extract data
if lc[2] is True:
data = os.path.abspath(data)
(data, key, group, units,
quant, dim, origin, name) = self._add_ref_from_file(
pfe=data,
key=key, group=group,
dim=dim, quant=quant, units=units, origin=origin, name=name,
comments=comments, delimiter=delimiter)
# Check key
c0 = type(key) is str and key not in self._ddata.keys()
if not c0:
msg = ("Arg key must be a str not already in self.ddata.keys()\n"
+ "\t- key: {}\n".format(key))
raise Exception(msg)
# Check group
c0 = group in self._dgroup.keys()
if not c0:
msg = ("Arg group must be str in self.dgroup.keys()\n"
+ "\t- group: {}".format(group)
+ "\t- available groups: {}".format(self.dgroups.keys()))
raise Exception(msg)
return data, key, group, units, dim, quant, origin, name
@staticmethod
def _add_ref_from_file(pfe=None, key=None, group=None,
dim=None, quant=None, units=None,
origin=None, name=None,
comments=None, delimiter=None):
if comments is None:
comments = '#'
lf = ['.mat', '.txt']
c0 = pfe[-4:] in lf
if not c0:
msg = ("Only the following file formats are supported:\n"
+ "\n\t- " + "\n\t- ".join(lf) + "\n"
+ "You provided: {}".format(pfe))
raise Exception(msg)
# Extract data
if pfe[-4:] == '.mat':
# load and check only one 1x1 struct
import scipy.io as scpio
out = scpio.loadmat(pfe)
ls = [ss for ss in out.keys() if '__' not in ss]
c0 = (len(ls) == 1
and isinstance(out[ls[0]], np.ndarray)
and len(out[ls[0]]) == 1)
if not c0:
msg = ("The file should contain a 1x1 matlab struct only!\n"
+ "file contains: {}".format(ls))
raise Exception(msg)
# Get into unique struct and get key / value pairs
out = out[ls[0]][0]
nk = len(out.dtype)
if nk != len(out[0]):
msg = ("Non-conform file!\n"
+ "\tlen(out.dtype) = {}\n".format(nk)
+ "\tlen(out[0] = {}".format(len(out[0])))
raise Exception(msg)
lvi = [ii for ii in range(nk)
if (out[0][ii].dtype.char == 'U'
and out[0][ii].shape == (1,))]
limat = [ii for ii in range(nk) if ii not in lvi]
c0 = ((len(limat) == 1 and nk >= 1)
and (out[0][limat[0]].ndim == 2
and 1 in out[0][limat[0]].shape))
if not c0:
msg = (
"The struct store in {} should contain:\n".format(pfe)
+ "\t- at least a (1, N) matrice\n"
+ "\t- optionally, the following char str:\n"
+ "\t\t- key: unique identifier\n"
+ "\t\t- group: 'time', 'radius', 'mesh', ...\n"
+ "\t\t- dim: physical dimension (e.g.: 'B flux',)\n"
+ "\t\t- quant: 'psi', 'phi, ...\n"
+ "\t\t- units: 'Wb', ...\n"
+ "\t\t- origin: 'NICE', 'CHEASE'..."
+ "\t\t\tby the default the file name\n"
+ "\t\t- name: short identifier (e.g.: 1dpsiNICE)\n\n"
+ "You provided:\n{}".format(out))
raise Exception(msg)
dout = {out.dtype.names[ii]: out[0][ii][0] for ii in lvi}
data = out[0][limat[0]].ravel()
elif pfe[-4:] == '.txt':
# data array
data = np.loadtxt(pfe, comments=comments, delimiter=delimiter)
if not data.ndim == 1:
msg = ("data stored in {} is not a 1d array!\n".format(pfe)
+ "\t- data.shape = {}".format(data.shape))
raise Exception(msg)
# params
dout = utils.from_txt_extract_params(
pfe=pfe,
lparams=['key', 'group', 'units',
'dim', 'quant', 'origin', 'name'],
comments=comments)
if 'origin' in dout.keys() and dout['origin'] is None:
del dout['origin']
# Get default values
din = {'key': key, 'group': group, 'dim': dim, 'quant': quant,
'units': units, 'origin': origin, 'name': name}
for k0, v0 in din.items():
if v0 is None:
din[k0] = dout.get(k0, pfe) if k0 == 'origin' else dout.get(k0)
else:
if dout.get(k0) is not None:
if din[k0] != dout[k0]:
msg = ("Non-matching values of {}:\n".format(k0)
+ "{}\n".format(pfe)
+ "\t- kwdarg: {}\n".format(din[k0])
+ "\t- file: {}".format(dout[k0]))
warnings.warn(msg)
return (data, din['key'], din['group'], din['units'],
din['quant'], din['dim'], din['origin'], din['name'])
def add_ref(self, key=None, data=None, group=None,
dim=None, quant=None, units=None, origin=None, name=None,
comments=None, delimiter=None):
""" Add a reference
The reference data is contained in data, which can be:
- np.array: a 1d profile
- dict: for mesh
- str: absolute path to a file, holding a 1d profile
Please also provide (if not included in file if data is a str):
- key: unique str identifying the data
- group: str identifying the reference group (self.dgroup.keys())
If data is a str to a file, key and group (and others) can be included
in the file
Parameters dim, quant, units, origin and name are optional
Parameters comments and delimiter and only used if data is the path to
a .txt file (fed to np.loadtxt)
"""
# Check inputs
(data, key, group, units,
dim, quant, origin, name) = self._checkformat_addref(
data=data, key=key, group=group, units=units,
dim=dim, quant=quant, origin=origin, name=name,
comments=comments, delimiter=delimiter)
# Format inputs
out = self._extract_dnd({key: {
'dim': dim, 'quant': quant, 'name': name,
'units': units, 'origin': origin
}},
key)
dim, quant, origin, name, units = out
if type(data) is np.ndarray:
size = data.shape[0]
else:
assert data['ftype'] in [0, 1]
size = data['nnodes'] if data['ftype'] == 1 else data['nfaces']
# Update attributes
self._dindref[key] = {'group': group, 'size': size, 'ldata': [key]}
self._ddata[key] = {'data': data,
'dim': dim, 'quant': quant, 'units': units,
'origin': origin, 'name': name,
'depend': (key,), 'lgroup': [group]}
# Run global consistency check and complement if necessary
self._complement()
def add_quantity(self, key=None, data=None, depend=None,
dim=None, quant=None, units=None,
origin=None, name=None):
""" Add a quantity """
c0 = type(key) is str and key not in self._ddata.keys()
if not c0:
msg = "key must be a str not already in self.ddata.keys()!\n"
msg += " - Provided: %s"%str(key)
raise Exception(msg)
if type(data) not in [np.ndarray, dict]:
msg = "data must be either:\n"
msg += " - np.ndarray\n"
msg += " - dict (mesh)\n"
msg += "\n Provided: %s"%str(type(data))
raise Exception(msg)
out = self._extract_dnd({key:{'dim':dim, 'quant':quant, 'name':name,
'units':units, 'origin':origin}}, key)
dim, quant, origin, name, units = out
assert type(depend) in [list,str,tuple]
if type(depend) is str:
depend = (depend,)
for ii in range(0,len(depend)):
assert depend[ii] in self._dindref.keys()
lgroup = [self._dindref[dd]['group'] for dd in depend]
self._ddata[key] = {'data':data,
'dim':dim, 'quant':quant, 'units':units,
'origin':origin, 'name':name,
'depend':tuple(depend), 'lgroup':lgroup}
self._complement()
#---------------------
# Method for getting time of a quantity
#---------------------
def get_time(self, key):
""" Return the time vector associated to a chosen quantity (identified
by its key)"""
if key not in self._ddata.keys():
msg = "Provided key not in self.ddata.keys() !\n"
msg += " - Provided: %s\n"%str(key)
msg += " - Available: %s\n"%str(self._ddata.keys())
raise Exception(msg)
indref = self._ddata[key]['depend'][0]
t = [kk for kk in self._dindref[indref]['ldata']
if (self._ddata[kk]['depend'] == (indref,)
and self._ddata[kk]['quant'] == 't')]
if len(t) != 1:
msg = "No / several macthing time vectors were identified:\n"
msg += " - Provided: %s\n"%key
msg += " - Found: %s"%str(t)
raise Exception(msg)
return t[0]
def get_time_common(self, lkeys, choose=None):
""" Return the common time vector to several quantities
If they do not have a common time vector, a reference one is choosen
according to criterion choose
"""
# Check all data have time-dependency
dout = {kk: {'t':self.get_time(kk)} for kk in lkeys}
dtu = dict.fromkeys(set([vv['t'] for vv in dout.values()]))
for kt in dtu.keys():
dtu[kt] = {'ldata':[kk for kk in lkeys if dout[kk]['t'] == kt]}
if len(dtu) == 1:
tref = list(dtu.keys())[0]
else:
lt, lres = zip(*[(kt,np.mean(np.diff(self._ddata[kt]['data'])))
for kt in dtu.keys()])
if choose is None:
choose = 'min'
if choose == 'min':
tref = lt[np.argmin(lres)]
return dout, dtu, tref
@staticmethod
def _get_time_common_arrays(dins, choose=None):
dout = dict.fromkeys(dins.keys())
dtu = {}
for k, v in dins.items():
c0 = type(k) is str
c0 = c0 and all([ss in v.keys() for ss in ['val','t']])
c0 = c0 and all([type(v[ss]) is np.ndarray for ss in ['val','t']])
c0 = c0 and v['t'].size in v['val'].shape
if not c0:
msg = "dins must be a dict of the form (at least):\n"
msg += " dins[%s] = {'val': np.ndarray,\n"%str(k)
msg += " 't': np.ndarray}\n"
msg += "Provided: %s"%str(dins)
raise Exception(msg)
kt, already = id(v['t']), True
if kt not in dtu.keys():
lisclose = [kk for kk, vv in dtu.items()
if (vv['val'].shape == v['t'].shape
and np.allclose(vv['val'],v['t']))]
assert len(lisclose) <= 1
if len(lisclose) == 1:
kt = lisclose[0]
else:
already = False
dtu[kt] = {'val':np.atleast_1d(v['t']).ravel(),
'ldata':[k]}
if already:
dtu[kt]['ldata'].append(k)
assert dtu[kt]['val'].size == v['val'].shape[0]
dout[k] = {'val':v['val'], 't':kt}
if len(dtu) == 1:
tref = list(dtu.keys())[0]
else:
lt, lres = zip(*[(kt,np.mean(np.diff(dtu[kt]['val'])))
for kt in dtu.keys()])
if choose is None:
choose = 'min'
if choose == 'min':
tref = lt[np.argmin(lres)]
return dout, dtu, tref
def _interp_on_common_time(self, lkeys,
choose='min', interp_t=None, t=None,
fill_value=np.nan):
""" Return a dict of time-interpolated data """
dout, dtu, tref = self.get_time_common(lkeys)
if type(t) is np.ndarray:
tref = np.atleast_1d(t).ravel()
tr = tref
ltu = dtu.keys()
else:
if type(t) is str:
tref = t
tr = self._ddata[tref]['data']
ltu = set(dtu.keys())
if tref in dtu.keys():
ltu = ltu.difference([tref])
if interp_t is None:
interp_t = _INTERPT
# Interpolate
for tt in ltu:
for kk in dtu[tt]['ldata']:
dout[kk]['val'] = scpinterp.interp1d(self._ddata[tt]['data'],
self._ddata[kk]['data'],
kind=interp_t, axis=0,
bounds_error=False,
fill_value=fill_value)(tr)
if type(tref) is not np.ndarray and tref in dtu.keys():
for kk in dtu[tref]['ldata']:
dout[kk]['val'] = self._ddata[kk]['data']
return dout, tref
def _interp_on_common_time_arrays(self, dins,
choose='min', interp_t=None, t=None,
fill_value=np.nan):
""" Return a dict of time-interpolated data """
dout, dtu, tref = self._get_time_common_arrays(dins)
if type(t) is np.ndarray:
tref = np.atleast_1d(t).ravel()
tr = tref
ltu = dtu.keys()
else:
if type(t) is str:
assert t in dout.keys()
tref = dout[t]['t']
tr = dtu[tref]['val']
ltu = set(dtu.keys()).difference([tref])
if interp_t is None:
interp_t = _INTERPT
# Interpolate
for tt in ltu:
for kk in dtu[tt]['ldata']:
dout[kk]['val'] = scpinterp.interp1d(dtu[tt]['val'],
dout[kk]['val'],
kind=interp_t, axis=0,
bounds_error=False,
fill_value=fill_value)(tr)
return dout, tref
def interp_t(self, dkeys,
choose='min', interp_t=None, t=None,
fill_value=np.nan):
# Check inputs
assert type(dkeys) in [list,dict]
if type(dkeys) is list:
dkeys = {kk:{'val':kk} for kk in dkeys}
lc = [(type(kk) is str
and type(vv) is dict
and type(vv.get('val',None)) in [str,np.ndarray])
for kk,vv in dkeys.items()]
assert all(lc), str(dkeys)
# Separate by type
dk0 = dict([(kk,vv) for kk,vv in dkeys.items()
if type(vv['val']) is str])
dk1 = dict([(kk,vv) for kk,vv in dkeys.items()
if type(vv['val']) is np.ndarray])
assert len(dkeys) == len(dk0) + len(dk1), str(dk0) + '\n' + str(dk1)
if len(dk0) == len(dkeys):
lk = [v['val'] for v in dk0.values()]
dout, tref = self._interp_on_common_time(lk, choose=choose,
t=t, interp_t=interp_t,
fill_value=fill_value)
dout = {kk:{'val':dout[vv['val']]['val'], 't':dout[vv['val']]['t']}
for kk,vv in dk0.items()}
elif len(dk1) == len(dkeys):
dout, tref = self._interp_on_common_time_arrays(dk1, choose=choose,
t=t, interp_t=interp_t,
fill_value=fill_value)
else:
lk = [v['val'] for v in dk0.values()]
if type(t) is np.ndarray:
dout, tref = self._interp_on_common_time(lk, choose=choose,
t=t, interp_t=interp_t,
fill_value=fill_value)
dout1, _ = self._interp_on_common_time_arrays(dk1, choose=choose,
t=t, interp_t=interp_t,
fill_value=fill_value)
else:
dout0, dtu0, tref0 = self.get_time_common(lk,
choose=choose)
dout1, dtu1, tref1 = self._get_time_common_arrays(dk1,
choose=choose)
if type(t) is str:
lc = [t in dtu0.keys(), t in dout1.keys()]
if not any(lc):
msg = "if t is str, it must refer to a valid key:\n"
msg += " - %s\n"%str(dtu0.keys())
msg += " - %s\n"%str(dout1.keys())
msg += "Provided: %s"%t
raise Exception(msg)
if lc[0]:
t0, t1 = t, self._ddata[t]['data']
else:
t0, t1 = dtu1[dout1[t]['t']]['val'], t
tref = t
else:
if choose is None:
choose = 'min'
if choose == 'min':
t0 = self._ddata[tref0]['data']
t1 = dtu1[tref1]['val']
dt0 = np.mean(np.diff(t0))
dt1 = np.mean(np.diff(t1))
if dt0 < dt1:
t0, t1, tref = tref0, t0, tref0
else:
t0, t1, tref = t1, tref1, tref1
dout, tref = self._interp_on_common_time(lk, choose=choose,
t=t0, interp_t=interp_t,
fill_value=fill_value)
dout = {kk:{'val':dout[vv['val']]['val'],
't':dout[vv['val']]['t']}
for kk,vv in dk0.items()}
dout1, _ = self._interp_on_common_time_arrays(dk1, choose=choose,
t=t1, interp_t=interp_t,
fill_value=fill_value)
dout.update(dout1)
return dout, tref
#---------------------
# Methods for computing additional plasma quantities
#---------------------
def _fill_dins(self, dins):
for k in dins.keys():
if type(dins[k]['val']) is str:
assert dins[k]['val'] in self._ddata.keys()
else:
dins[k]['val'] = np.atleast_1d(dins[k]['val'])
assert dins[k]['t'] is not None
dins[k]['t'] = np.atleast_1d(dins[k]['t']).ravel()
assert dins[k]['t'].size == dins[k]['val'].shape[0]
return dins
@staticmethod
def _checkformat_shapes(dins):
shape = None
for k in dins.keys():
dins[k]['shape'] = dins[k]['val'].shape
if shape is None:
shape = dins[k]['shape']
if dins[k]['shape'] != shape:
if dins[k]['val'].ndim > len(shape):
shape = dins[k]['shape']
# Check shape consistency for broadcasting
assert len(shape) in [1,2]
if len(shape) == 1:
for k in dins.keys():
assert dins[k]['shape'][0] in [1,shape[0]]
if dins[k]['shape'][0] < shape[0]:
dins[k]['val'] = np.full((shape[0],), dins[k]['val'][0])
dins[k]['shape'] = dins[k]['val'].shape
elif len(shape) == 2:
for k in dins.keys():
if len(dins[k]['shape']) == 1:
if dins[k]['shape'][0] not in [1]+list(shape):
msg = "Non-conform shape for dins[%s]:\n"%k
msg += " - Expected: (%s,...) or (1,)\n"%str(shape[0])
msg += " - Provided: %s"%str(dins[k]['shape'])
raise Exception(msg)
if dins[k]['shape'][0] == 1:
dins[k]['val'] = dins[k]['val'][None,:]
elif dins[k]['shape'][0] == shape[0]:
dins[k]['val'] = dins[k]['val'][:,None]
else:
dins[k]['val'] = dins[k]['val'][None,:]
else:
assert dins[k]['shape'] == shape
dins[k]['shape'] = dins[k]['val'].shape
return dins
def compute_bremzeff(self, Te=None, ne=None, zeff=None, lamb=None,
tTe=None, tne=None, tzeff=None, t=None,
interp_t=None):
""" Return the bremsstrahlung spectral radiance at lamb
The plasma conditions are set by:
- Te (eV)
- ne (/m3)
- zeff (adim.)
The wavelength is set by the diagnostics
- lamb (m)
The vol. spectral emis. is returned in ph / (s.m3.sr.m)
The computation requires an intermediate : gff(Te, zeff)
"""
dins = {'Te':{'val':Te, 't':tTe},
'ne':{'val':ne, 't':tne},
'zeff':{'val':zeff, 't':tzeff}}
lc = [vv['val'] is None for vv in dins.values()]
if any(lc):
msg = "All fields should be provided:\n"
msg += " - %s"%str(dins.keys())
raise Exception(msg)
dins = self._fill_dins(dins)
dins, t = self.interp_t(dins, t=t, interp_t=interp_t)
lamb = np.atleast_1d(lamb)
dins['lamb'] = {'val':lamb}
dins = self._checkformat_shapes(dins)
val, units = _physics.compute_bremzeff(dins['Te']['val'],
dins['ne']['val'],
dins['zeff']['val'],
dins['lamb']['val'])
return val, t, units
def compute_fanglev(self, BR=None, BPhi=None, BZ=None,
ne=None, lamb=None, t=None, interp_t=None,
tBR=None, tBPhi=None, tBZ=None, tne=None):
""" Return the vector faraday angle at lamb
The plasma conditions are set by:
- BR (T) , array of R component of B
- BRPhi (T) , array of phi component of B
- BZ (T) , array of Z component of B
- ne (/m3)
The wavelength is set by the diagnostics
- lamb (m)
The vector faraday angle is returned in T / m
"""
dins = {'BR': {'val':BR, 't':tBR},
'BPhi':{'val':BPhi, 't':tBPhi},
'BZ': {'val':BZ, 't':tBZ},
'ne': {'val':ne, 't':tne}}
dins = self._fill_dins(dins)
dins, t = self.interp_t(dins, t=t, interp_t=interp_t)
lamb = np.atleast_1d(lamb)
dins['lamb'] = {'val':lamb}
dins = self._checkformat_shapes(dins)
val, units = _physics.compute_fangle(BR=dins['BR']['val'],
BPhi=dins['BPhi']['val'],
BZ=dins['BZ']['val'],
ne=dins['ne']['val'],
lamb=dins['lamb']['val'])
return val, t, units
#---------------------
# Methods for interpolation
#---------------------
def _get_quantrefkeys(self, qq, ref1d=None, ref2d=None):
# Get relevant lists
kq, msg = self._get_keyingroup(qq, 'mesh', msgstr='quant', raise_=False)
if kq is not None:
k1d, k2d = None, None
else:
kq, msg = self._get_keyingroup(qq, 'radius', msgstr='quant', raise_=True)
if ref1d is None and ref2d is None:
msg = "quant %s needs refs (1d and 2d) for interpolation\n"%qq
msg += " => ref1d and ref2d cannot be both None !"
raise Exception(msg)
if ref1d is None:
ref1d = ref2d
k1d, msg = self._get_keyingroup(ref1d, 'radius',
msgstr='ref1d', raise_=False)
if k1d is None:
msg += "\n\nInterpolation of %s:\n"%qq
msg += " ref could not be identified among 1d quantities\n"
msg += " - ref1d : %s"%ref1d
raise Exception(msg)
if ref2d is None:
ref2d = ref1d
k2d, msg = self._get_keyingroup(ref2d, 'mesh',
msgstr='ref2d', raise_=False)
if k2d is None:
msg += "\n\nInterpolation of %s:\n"
msg += " ref could not be identified among 2d quantities\n"
msg += " - ref2d: %s"%ref2d
raise Exception(msg)
q1d, q2d = self._ddata[k1d]['quant'], self._ddata[k2d]['quant']
if q1d != q2d:
msg = "ref1d and ref2d must be of the same quantity !\n"
msg += " - ref1d (%s): %s\n"%(ref1d, q1d)
msg += " - ref2d (%s): %s"%(ref2d, q2d)
raise Exception(msg)
return kq, k1d, k2d
def _get_indtmult(self, idquant=None, idref1d=None, idref2d=None):
# Get time vectors and bins
idtq = self._ddata[idquant]['depend'][0]
tq = self._ddata[idtq]['data']
tbinq = 0.5*(tq[1:]+tq[:-1])
if idref1d is not None:
idtr1 = self._ddata[idref1d]['depend'][0]
tr1 = self._ddata[idtr1]['data']
tbinr1 = 0.5*(tr1[1:]+tr1[:-1])
if idref2d is not None and idref2d != idref1d:
idtr2 = self._ddata[idref2d]['depend'][0]
tr2 = self._ddata[idtr2]['data']
tbinr2 = 0.5*(tr2[1:]+tr2[:-1])
# Get tbinall and tall
if idref1d is None:
tbinall = tbinq
tall = tq
else:
if idref2d is None:
tbinall = np.unique(np.r_[tbinq,tbinr1])
else:
tbinall = np.unique(np.r_[tbinq,tbinr1,tbinr2])
tall = np.r_[tbinall[0] - 0.5*(tbinall[1]-tbinall[0]),
0.5*(tbinall[1:]+tbinall[:-1]),
tbinall[-1] + 0.5*(tbinall[-1]-tbinall[-2])]
# Get indtqr1r2 (tall with respect to tq, tr1, tr2)
indtq, indtr1, indtr2 = None, None, None
if tbinq.size > 0:
indtq = np.digitize(tall, tbinq)
else:
indtq = np.r_[0]
if idref1d is None:
assert np.all(indtq == np.arange(0,tall.size))
if idref1d is not None:
if tbinr1.size > 0:
indtr1 = np.digitize(tall, tbinr1)
else:
indtr1 = np.r_[0]
if idref2d is not None:
if tbinr2.size > 0:
indtr2 = np.digitize(tall, tbinr2)
else:
indtr2 = np.r_[0]
ntall = tall.size
return tall, tbinall, ntall, indtq, indtr1, indtr2
@staticmethod
def _get_indtu(t=None, tall=None, tbinall=None,
idref1d=None, idref2d=None,
indtr1=None, indtr2=None):
# Get indt (t with respect to tbinall)
indt, indtu = None, None
if t is not None:
if len(t) == len(tall) and np.allclose(t, tall):
indt = np.arange(0, tall.size)
indtu = indt
else:
indt = np.digitize(t, tbinall)
indtu = np.unique(indt)
# Update
tall = tall[indtu]
if idref1d is not None:
assert indtr1 is not None
indtr1 = indtr1[indtu]
if idref2d is not None:
assert indtr2 is not None
indtr2 = indtr2[indtu]
ntall = tall.size
return tall, ntall, indt, indtu, indtr1, indtr2
def get_tcommon(self, lq, prefer='finer'):
""" Check if common t, else choose according to prefer
By default, prefer the finer time resolution
"""
if type(lq) is str:
lq = [lq]
t = []
for qq in lq:
ltr = [kk for kk in self._ddata[qq]['depend']
if self._dindref[kk]['group'] == 'time']
assert len(ltr) <= 1
if len(ltr) > 0 and ltr[0] not in t:
t.append(ltr[0])
assert len(t) >= 1
if len(t) > 1:
dt = [np.nanmean(np.diff(self._ddata[tt]['data'])) for tt in t]
if prefer == 'finer':
ind = np.argmin(dt)
else:
ind = np.argmax(dt)
else:
ind = 0
return t[ind], t
def _get_tcom(self, idquant=None, idref1d=None,
idref2d=None, idq2dR=None):
if idquant is not None:
out = self._get_indtmult(idquant=idquant,
idref1d=idref1d, idref2d=idref2d)
else:
out = self._get_indtmult(idquant=idq2dR)
return out
def _get_finterp(
self,
idquant=None, idref1d=None, idref2d=None,
idq2dR=None, idq2dPhi=None, idq2dZ=None,
interp_t=None, interp_space=None,
fill_value=None, ani=False, Type=None,
):
if interp_t is None:
interp_t = 'nearest'
# Get idmesh
if idquant is not None:
if idref1d is None:
lidmesh = [qq for qq in self._ddata[idquant]['depend']
if self._dindref[qq]['group'] == 'mesh']
else:
lidmesh = [qq for qq in self._ddata[idref2d]['depend']
if self._dindref[qq]['group'] == 'mesh']
else:
assert idq2dR is not None
lidmesh = [qq for qq in self._ddata[idq2dR]['depend']
if self._dindref[qq]['group'] == 'mesh']
assert len(lidmesh) == 1
idmesh = lidmesh[0]
# Get common time indices
if interp_t == 'nearest':
out = self._get_tcom(idquant, idref1d, idref2d, idq2dR)
tall, tbinall, ntall, indtq, indtr1, indtr2 = out
# Get mesh
if self._ddata[idmesh]['data']['type'] == 'rect':
mpltri = None
trifind = self._ddata[idmesh]['data']['trifind']
else:
mpltri = self._ddata[idmesh]['data']['mpltri']
trifind = mpltri.get_trifinder()
# # Prepare output
# Interpolate
# Note : Maybe consider using scipy.LinearNDInterpolator ?
if idquant is not None:
vquant = self._ddata[idquant]['data']
c0 = (self._ddata[idmesh]['data']['type'] == 'quadtri'
and self._ddata[idmesh]['data']['ntri'] > 1)
if c0:
vquant = np.repeat(vquant,
self._ddata[idmesh]['data']['ntri'], axis=0)
else:
vq2dR = self._ddata[idq2dR]['data']
vq2dPhi = self._ddata[idq2dPhi]['data']
vq2dZ = self._ddata[idq2dZ]['data']
if interp_space is None:
interp_space = self._ddata[idmesh]['data']['ftype']
# get interpolation function
if ani:
# Assuming same mesh and time vector for all 3 components
func = _comp.get_finterp_ani(
idq2dR, idq2dPhi, idq2dZ,
idmesh=idmesh, vq2dR=vq2dR,
vq2dZ=vq2dZ, vq2dPhi=vq2dPhi,
tall=tall, tbinall=tbinall,
ntall=ntall,
interp_t=interp_t,
interp_space=interp_space,
fill_value=fill_value,
indtq=indtq, trifind=trifind,
Type=Type, mpltri=mpltri,
)
else:
func = _comp.get_finterp_isotropic(
idquant, idref1d, idref2d,
vquant=vquant,
interp_t=interp_t,
interp_space=interp_space,
fill_value=fill_value,
idmesh=idmesh,
tall=tall, tbinall=tbinall,
ntall=ntall, mpltri=mpltri,
indtq=indtq, indtr1=indtr1,
indtr2=indtr2, trifind=trifind,
)
return func
def _checkformat_qr12RPZ(self, quant=None, ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None):
lc0 = [quant is None, ref1d is None, ref2d is None]
lc1 = [q2dR is None, q2dPhi is None, q2dZ is None]
if np.sum([all(lc0), all(lc1)]) != 1:
msg = "Please provide either (xor):\n"
msg += " - a scalar field (isotropic emissivity):\n"
msg += " quant : scalar quantity to interpolate\n"
msg += " if quant is 1d, intermediate reference\n"
msg += " fields are necessary for 2d interpolation\n"
msg += " ref1d : 1d reference field on which to interpolate\n"
msg += " ref2d : 2d reference field on which to interpolate\n"
msg += " - a vector (R,Phi,Z) field (anisotropic emissivity):\n"
msg += " q2dR : R component of the vector field\n"
msg += " q2dPhi: R component of the vector field\n"
msg += " q2dZ : Z component of the vector field\n"
msg += " => all components have teh same time and mesh !\n"
raise Exception(msg)
# Check requested quant is available in 2d or 1d
if all(lc1):
idquant, idref1d, idref2d = self._get_quantrefkeys(quant, ref1d, ref2d)
idq2dR, idq2dPhi, idq2dZ = None, None, None
ani = False
else:
idq2dR, msg = self._get_keyingroup(q2dR, 'mesh', msgstr='quant',
raise_=True)
idq2dPhi, msg = self._get_keyingroup(q2dPhi, 'mesh', msgstr='quant',
raise_=True)
idq2dZ, msg = self._get_keyingroup(q2dZ, 'mesh', msgstr='quant',
raise_=True)
idquant, idref1d, idref2d = None, None, None
ani = True
return idquant, idref1d, idref2d, idq2dR, idq2dPhi, idq2dZ, ani
def get_finterp2d(self, quant=None, ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
interp_t=None, interp_space=None,
fill_value=None, Type=None):
""" Return the function interpolating (X,Y,Z) pts on a 1d/2d profile
Can be used as input for tf.geom.CamLOS1D/2D.calc_signal()
"""
# Check inputs
msg = "Only 'nearest' available so far for interp_t!"
assert interp_t == 'nearest', msg
out = self._checkformat_qr12RPZ(quant=quant, ref1d=ref1d, ref2d=ref2d,
q2dR=q2dR, q2dPhi=q2dPhi, q2dZ=q2dZ)
idquant, idref1d, idref2d, idq2dR, idq2dPhi, idq2dZ, ani = out
# Interpolation (including time broadcasting)
func = self._get_finterp(idquant=idquant, idref1d=idref1d,
idref2d=idref2d, idq2dR=idq2dR,
idq2dPhi=idq2dPhi, idq2dZ=idq2dZ,
interp_t=interp_t, interp_space=interp_space,
fill_value=fill_value, ani=ani, Type=Type)
return func
def interp_pts2profile(self, pts=None, vect=None, t=None,
quant=None, ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
interp_t=None, interp_space=None,
fill_value=None, Type=None):
""" Return the value of the desired profiles_1d quantity
For the desired inputs points (pts):
- pts are in (X,Y,Z) coordinates
- space interpolation is linear on the 1d profiles
At the desired input times (t):
- using a nearest-neighbourg approach for time
"""
# Check inputs
# msg = "Only 'nearest' available so far for interp_t!"
# assert interp_t == 'nearest', msg
# Check requested quant is available in 2d or 1d
out = self._checkformat_qr12RPZ(quant=quant, ref1d=ref1d, ref2d=ref2d,
q2dR=q2dR, q2dPhi=q2dPhi, q2dZ=q2dZ)
idquant, idref1d, idref2d, idq2dR, idq2dPhi, idq2dZ, ani = out
# Check the pts is (2,...) array of floats
if pts is None:
if ani:
idmesh = [id_ for id_ in self._ddata[idq2dR]['depend']
if self._dindref[id_]['group'] == 'mesh'][0]
else:
if idref1d is None:
idmesh = [id_ for id_ in self._ddata[idquant]['depend']
if self._dindref[id_]['group'] == 'mesh'][0]
else:
idmesh = [id_ for id_ in self._ddata[idref2d]['depend']
if self._dindref[id_]['group'] == 'mesh'][0]
if self.dmesh[idmesh]['data']['type'] == 'rect':
if self.dmesh[idmesh]['data']['shapeRZ'] == ('R', 'Z'):
R = np.repeat(self.dmesh[idmesh]['data']['R'],
self.dmesh[idmesh]['data']['nZ'])
Z = np.tile(self.dmesh[idmesh]['data']['Z'],
self.dmesh[idmesh]['data']['nR'])
else:
R = np.tile(self.dmesh[idmesh]['data']['R'],
self.dmesh[idmesh]['data']['nZ'])
Z = np.repeat(self.dmesh[idmesh]['data']['Z'],
self.dmesh[idmesh]['data']['nR'])
pts = np.array(
[R, np.zeros((self.dmesh[idmesh]['data']['size'],)), Z])
else:
pts = self.dmesh[idmesh]['data']['nodes']
pts = np.array(
[pts[:, 0], np.zeros((pts.shape[0],)), pts[:, 1]])
pts = np.atleast_2d(pts)
if pts.shape[0] != 3:
msg = "pts must be np.ndarray of (X,Y,Z) points coordinates\n"
msg += "Can be multi-dimensional, but the 1st dimension is (X,Y,Z)\n"
msg += " - Expected shape : (3,...)\n"
msg += " - Provided shape : %s"%str(pts.shape)
raise Exception(msg)
# Check t
lc = [t is None, type(t) is str, type(t) is np.ndarray]
assert any(lc)
if lc[1]:
assert t in self._ddata.keys()
t = self._ddata[t]['data']
# Interpolation (including time broadcasting)
# this is the second slowest step (~0.08 s)
func = self._get_finterp(
idquant=idquant, idref1d=idref1d, idref2d=idref2d,
idq2dR=idq2dR, idq2dPhi=idq2dPhi, idq2dZ=idq2dZ,
interp_t=interp_t, interp_space=interp_space,
fill_value=fill_value, ani=ani, Type=Type,
)
# This is the slowest step (~1.8 s)
val, t = func(pts, vect=vect, t=t)
return val, t
def calc_signal_from_Cam(self, cam, t=None,
quant=None, ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
Brightness=True, interp_t=None,
interp_space=None, fill_value=None,
res=0.005, DL=None, resMode='abs', method='sum',
ind=None, out=object, plot=True, dataname=None,
fs=None, dmargin=None, wintit=None, invert=True,
units=None, draw=True, connect=True):
if 'Cam' not in cam.__class__.__name__:
msg = "Arg cam must be tofu Camera instance (CamLOS1D, CamLOS2D...)"
raise Exception(msg)
return cam.calc_signal_from_Plasma2D(self, t=t,
quant=quant, ref1d=ref1d, ref2d=ref2d,
q2dR=q2dR, q2dPhi=q2dPhi,
q2dZ=q2dZ,
Brightness=Brightness,
interp_t=interp_t,
interp_space=interp_space,
fill_value=fill_value, res=res,
DL=DL, resMode=resMode,
method=method, ind=ind, out=out,
pot=plot, dataname=dataname,
fs=fs, dmargin=dmargin,
wintit=wintit, invert=invert,
units=units, draw=draw,
connect=connect)
#---------------------
# Methods for getting data
#---------------------
def get_dextra(self, dextra=None):
lc = [dextra is None, dextra == 'all', type(dextra) is dict,
type(dextra) is str, type(dextra) is list]
assert any(lc)
if dextra is None:
dextra = {}
if dextra == 'all':
dextra = [k for k in self._dgroup['time']['ldata']
if (self._ddata[k]['lgroup'] == ['time']
and k not in self._dindref.keys())]
if type(dextra) is str:
dextra = [dextra]
# get data
if type(dextra) is list:
for ii in range(0,len(dextra)):
if type(dextra[ii]) is tuple:
ee, cc = dextra[ii]
else:
ee, cc = dextra[ii], None
ee, msg = self._get_keyingroup(ee, 'time', raise_=True)
if self._ddata[ee]['lgroup'] != ['time']:
msg = "time-only dependent signals allowed in dextra!\n"
msg += " - %s : %s"%(ee,str(self._ddata[ee]['lgroup']))
raise Exception(msg)
idt = self._ddata[ee]['depend'][0]
key = 'data' if self._ddata[ee]['data'].ndim == 1 else 'data2D'
dd = {key: self._ddata[ee]['data'],
't': self._ddata[idt]['data'],
'label': self._ddata[ee]['name'],
'units': self._ddata[ee]['units']}
if cc is not None:
dd['c'] = cc
dextra[ii] = (ee, dd)
dextra = dict(dextra)
return dextra
def get_Data(self, lquant, X=None, ref1d=None, ref2d=None,
remap=False, res=0.01, interp_space=None, dextra=None):
try:
import tofu.data as tfd
except Exception:
from .. import data as tfd
# Check and format input
assert type(lquant) in [str,list]
if type(lquant) is str:
lquant = [lquant]
nquant = len(lquant)
# Get X if common
c0 = type(X) is str
c1 = type(X) is list and (len(X) == 1 or len(X) == nquant)
if not (c0 or c1):
msg = ("X must be specified, either as :\n"
+ " - a str (name or quant)\n"
+ " - a list of str\n"
+ " Provided: {}".format(X))
raise Exception(msg)
if c1 and len(X) == 1:
X = X[0]
if type(X) is str:
idX, msg = self._get_keyingroup(X, 'radius', msgstr='X', raise_=True)
# prepare remap pts
if remap:
assert self.config is not None
refS = list(self.config.dStruct['dObj']['Ves'].values())[0]
ptsRZ, x1, x2, extent = refS.get_sampleCross(res, mode='imshow')
dmap = {'t':None, 'data2D':None, 'extent':extent}
if ref is None and X in self._lquantboth:
ref = X
# Define Data
dcommon = dict(Exp=self.Id.Exp, shot=self.Id.shot,
Diag='profiles1d', config=self.config)
# dextra
dextra = self.get_dextra(dextra)
# Get output
lout = [None for qq in lquant]
for ii in range(0,nquant):
qq = lquant[ii]
if remap:
# Check requested quant is available in 2d or 1d
idq, idrefd1, idref2d = self._get_quantrefkeys(qq, ref1d, ref2d)
else:
idq, msg = self._get_keyingroup(qq, 'radius',
msgstr='quant', raise_=True)
if idq not in self._dgroup['radius']['ldata']:
msg = "Only 1d quantities can be turned into tf.data.Data !\n"
msg += " - %s is not a radius-dependent quantity"%qq
raise Exception(msg)
idt = self._ddata[idq]['depend'][0]
if type(X) is list:
idX, msg = self._get_keyingroup(X[ii], 'radius',
msgstr='X', raise_=True)
dlabels = {'data':{'name': self._ddata[idq]['name'],
'units': self._ddata[idq]['units']},
'X':{'name': self._ddata[idX]['name'],
'units': self._ddata[idX]['units']},
't':{'name': self._ddata[idt]['name'],
'units': self._ddata[idt]['units']}}
dextra_ = dict(dextra)
if remap:
dmapii = dict(dmap)
val, tii = self.interp_pts2profile(qq, ptsRZ=ptsRZ, ref=ref,
interp_space=interp_space)
dmapii['data2D'], dmapii['t'] = val, tii
dextra_['map'] = dmapii
lout[ii] = DataCam1D(Name = qq,
data = self._ddata[idq]['data'],
t = self._ddata[idt]['data'],
X = self._ddata[idX]['data'],
dextra = dextra_, dlabels=dlabels, **dcommon)
if nquant == 1:
lout = lout[0]
return lout
#---------------------
# Methods for plotting data
#---------------------
def plot(self, lquant, X=None,
ref1d=None, ref2d=None,
remap=False, res=0.01, interp_space=None,
sharex=False, bck=True):
lDat = self.get_Data(lquant, X=X, remap=remap,
ref1d=ref1d, ref2d=ref2d,
res=res, interp_space=interp_space)
if type(lDat) is list:
kh = lDat[0].plot_combine(lDat[1:], sharex=sharex, bck=bck)
else:
kh = lDat.plot(bck=bck)
return kh
def plot_combine(self, lquant, lData=None, X=None,
ref1d=None, ref2d=None,
remap=False, res=0.01, interp_space=None,
sharex=False, bck=True):
""" plot combining several quantities from the Plasma2D itself and
optional extra list of Data instances """
lDat = self.get_Data(lquant, X=X, remap=remap,
ref1d=ref1d, ref2d=ref2d,
res=res, interp_space=interp_space)
if lData is not None:
if type(lDat) is list:
lData = lDat[1:] + lData
else:
lData = lDat[1:] + [lData]
kh = lDat[0].plot_combine(lData, sharex=sharex, bck=bck)
return kh
| mit |
gregcaporaso/q2d2 | setup.py | 1 | 1808 | #!/usr/bin/env python
import re
import ast
from setuptools import find_packages, setup
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('q2d2/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 1 - Planning
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = 'Prototype/experiments for microbiome analyses.'
with open('README.md') as f:
long_description = f.read()
authors = 'https://github.com/gregcaporaso/q2d2/graphs/contributors'
setup(name='q2d2',
version=version,
license='BSD',
description=description,
long_description=long_description,
author=authors,
author_email="gregcaporaso@gmail.com",
maintainer=authors,
maintainer_email="gregcaporaso@gmail.com",
url='https://github.com/gregcaporaso/q2d2',
packages=find_packages(),
scripts=['scripts/q2d2'],
package_data={'q2d2': ['q2d2/markdown/*md']},
install_requires=[
'scikit-bio',
'ipython',
'ipymd',
'click',
'seaborn',
'appdirs',
'pyyaml',
'python-frontmatter'
],
classifiers=classifiers,
)
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
schets/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
spbguru/repo1 | examples/opf/clients/hotgym/anomaly/one_gym/run.py | 15 | 4940 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot) | gpl-3.0 |
theDataGeek/pyhsmm | setup.py | 2 | 3380 | from distutils.core import setup, Extension
import numpy as np
import sys
import os
from glob import glob
PYHSMM_VERSION = "0.1.3"
###########################
# compilation arguments #
###########################
extra_link_args = []
extra_compile_args = []
if '--with-old-clang' in sys.argv:
sys.argv.remove('--with-old-clang')
extra_compile_args.append('-stdlib=libc++')
extra_link_args.append('-stdlib=libc++')
if '--with-openmp' in sys.argv:
sys.argv.remove('--with-openmp')
extra_compile_args.append('-fopenmp')
extra_link_args.append('-fopenmp')
if '--with-native' in sys.argv:
sys.argv.remove('--with-native')
extra_compile_args.append('-march=native')
if '--with-mkl' in sys.argv:
sys.argv.remove('--with-mkl')
# NOTE: there's no way this will work on Windows
extra_compile_args.extend(['-m64','-I' + os.environ['MKLROOT'] + '/include','-DEIGEN_USE_MKL_ALL'])
extra_link_args.extend(('-Wl,--start-group %(MKLROOT)s/lib/intel64/libmkl_intel_lp64.a %(MKLROOT)s/lib/intel64/libmkl_core.a %(MKLROOT)s/lib/intel64/libmkl_sequential.a -Wl,--end-group -lm' % {'MKLROOT':os.environ['MKLROOT']}).split(' '))
if '--with-assembly' in sys.argv:
sys.argv.remove('--with-assembly')
extra_compile_args.extend(['--save-temps','-masm=intel','-fverbose-asm'])
if '--with-cython' in sys.argv:
sys.argv.remove('--with-cython')
use_cython = True
else:
use_cython = False
#######################
# extension modules #
#######################
cython_pathspec = os.path.join('pyhsmm','**','*.pyx')
if use_cython:
from Cython.Build import cythonize
ext_modules = cythonize(cython_pathspec)
else:
paths = [os.path.splitext(fp)[0] for fp in glob(cython_pathspec)]
names = ['.'.join(os.path.split(p)) for p in paths]
ext_modules = [
Extension(name,
sources=[path + '.cpp'],
include_dirs=[os.path.join('pyhsmm','deps','Eigen3')],
extra_compile_args=['-O3','-std=c++11','-DNDEBUG','-w',
'-DHMM_TEMPS_ON_HEAP'])
for name, path in zip(names,paths)]
for e in ext_modules:
e.extra_compile_args.extend(extra_compile_args)
e.extra_link_args.extend(extra_link_args)
############
# basics #
############
setup(name='pyhsmm',
version=PYHSMM_VERSION,
description="Bayesian inference in HSMMs and HMMs",
author='Matthew James Johnson',
author_email='mattjj@csail.mit.edu',
maintainer='Matthew James Johnson',
maintainer_email='mattjj@csail.mit.edu',
url="https://github.com/mattjj/pyhsmm",
packages=['pyhsmm',
'pyhsmm.basic',
'pyhsmm.internals',
'pyhsmm.plugins',
'pyhsmm.util'],
platforms='ALL',
keywords=['bayesian', 'inference', 'mcmc', 'time-series',
'monte-carlo'],
install_requires=[
"Cython >= 0.20.1",
"numpy",
"scipy",
"matplotlib",
"nose",
"pybasicbayes",
],
package_data={"pyhsmm": [os.path.join("examples", "*.txt")]},
ext_modules=ext_modules,
include_dirs=[np.get_include(),],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++',
])
| mit |
pepper-johnson/Erudition | Thesis/Mallet/notebooks/models/imports/features.py | 1 | 1856 | import numpy as np
import pandas as pd
FEATURE_COLUMNS_WITH_DATE = [
'date',
'chibs',
'hm',
'is',
'lr',
'price'
]
INPUT_COLUMNS = [
'chibs',
'hm',
'is',
'lr',
]
OUTPUT_COLUMNS = [
'price'
]
def create_dataset(df):
## assert the input is a pandas dataframe
assert type(df) == pd.core.frame.DataFrame
## split into x, y
return df.loc[:, INPUT_COLUMNS], df.loc[:, OUTPUT_COLUMNS]
def import_file(file_path):
## assert the input is a string
assert type(file_path) == str
## load in features ...
features_df = pd.read_csv(file_path, index_col=0)
## return features data frame ...
return features_df.loc[:, FEATURE_COLUMNS_WITH_DATE]
def scale(features_df):
## assert the input is a pandas dataframe
assert type(features_df) == pd.core.frame.DataFrame
df = features_df.copy()
for col in np.union1d(INPUT_COLUMNS, OUTPUT_COLUMNS):
df[col] = df[col].map(lambda x: x / df[col].max()).astype(float)
return df
def scale_and_transform(features_df):
## assert the input is a pandas dataframe
assert type(features_df) == pd.core.frame.DataFrame
df = features_df.copy()
df.price = df.price.apply(np.log).astype(float)
for col in INPUT_COLUMNS:
df[col] = np.log(df[col] ** 2)
return scale(df)
def scale_into_datasets(features_df):
## assert the input is a pandas dataframe
assert type(features_df) == pd.core.frame.DataFrame
df = features_df.copy()
for col in INPUT_COLUMNS:
df[col] = np.log(df[col] ** 2)
return create_dataset(
scale(df))
def scale_and_transform_into_datasets(features_df):
return create_dataset(
scale_and_transform(features_df))
def mse(y, y_hat):
s_ = (y - y_hat) ** 2
return np.mean(s_)
| apache-2.0 |
jonwright/ImageD11 | ImageD11/depreciated/rebin2d.py | 1 | 3961 | # ImageD11_v0.4 Software for beamline ID11
# Copyright (C) 2005 Jon Wright
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Function for rebinning in two dimensions
Expected to be very slow and then re-implemented in C once it works, correctly
"""
from math import ceil,floor,sqrt
class polygon:
"""
Represents a 2D polygon
"""
def __init__(self,xy):
"""
xy are a list of pairs of xy points (directed)
"""
self.xy=xy
def area(self):
"""
http://mathworld.wolfram.com/PolygonArea.html
# sign of area tells us if it is convex or concave
"""
area=0.
for i in range(len(self.xy)):
x1,y1 = self.xy[i%len(self.xy)]
x2,y2 = self.xy[(i+1)%len(self.xy)]
area+=x1*y2-x2*y1
area=area/2.
return area
def walkaroundintegervertices(self):
"""
Generate a list of points along the edges of integers
"""
path=[]
l=[ item for item in self.xy ]
l.append(self.xy[0])
for j in range(len(l)-1):
p1 = l[j]
p2 = l[j+1]
path.append(l[j])
# Find points along edge
intersects=[]
for i in range(ceil(p1[0]),ceil(p2[0])):
# Intersections on zeroth axis
g = 1.*(p2[1]-p1[1])/(p2[0]-p1[0])
point=[i,p1[1]+g*(i-p1[0])]
intersects.append( [distance(p1,point),point] )
for i in range(ceil(p1[1]),ceil(p2[1])):
# Intersections on oneth axis
g = 1.*(p2[0]-p1[0])/(p2[1]-p1[1])
point=[p1[0]+g*(i-p1[1]),i]
intersects.append( [distance(p1,point),point] )
for i in range(ceil(p2[0]),ceil(p1[0])):
# Intersections on zeroth axis
g = 1.*(p2[1]-p1[1])/(p2[0]-p1[0])
point=[i,p1[1]+g*(i-p1[0])]
intersects.append( [distance(p1,point),point] )
for i in range(ceil(p2[1]),ceil(p1[1])):
# Intersections on oneth axis
g = 1.*(p2[0]-p1[0])/(p2[1]-p1[1])
point=[p1[0]+g*(i-p1[1]),i]
intersects.append( [distance(p1,point),point] )
if len(intersects)>0:
intersects.sort()
# print "intersects",intersects
for d,point in intersects:
path.append(point)
self.path=path
def testpolywalkandplot(vertices):
from matplotlib.pylab import plot
import numpy as np
obj = polygon(vertices)
obj.walkaroundintegervertices()
plot(np.array(vertices)[:,0],np.array(vertices)[:,1],"o")
plot(np.array(obj.path)[:,0],np.array(obj.path)[:,1],"+-")
print obj.area()
def distance(p1,p2):
return sqrt(p1[0]*p1[0]+p2[0]*p2[0])
def main():
print "started"
testpolywalkandplot([ [ 1.1, 0.9] ,
[ 3.2, 1.1] ,
[ 3.2, 4.2] ,
[ 1.2, 3.1] ])
testpolywalkandplot([ [ 4.1, 0.9] ,
[ 7.2, 1.1] ,
[ 5.2, 4.3] ,
[ 6.2, 1.8] ])
from matplotlib.pylab import show
show()
if __name__=="__main__":
main()
| gpl-2.0 |
tawsifkhan/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
ABoothInTheWild/baseball-research | true2018PlayoffPreds.py | 1 | 18268 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 05 22:49:05 2018
@author: Alexander
"""
#2018 Preseason Playoff Odds
import heapq
from collections import Counter
import pandas as pd
import numpy as np
from scipy.stats import beta
import os
#read data
os.chdir('C:/Users/abooth/Documents/Python Scripts/PastPreds/mlbPlayoffOdds2018')
beta18Pre = pd.read_csv("mlb2018PreSeasonBetaEstimates.csv")
#Name Divisions
AL_East = ["BOS", "NYY", "TOR", "BAL", "TBR"]
AL_Central = ["CLE", "DET", "CHW", "MIN", "KCR"]
AL_West = ["TEX", "HOU", "SEA", "OAK", "LAA"]
NL_East = ["PHI", "WSN", "MIA", "NYM", "ATL"]
NL_Central = ["STL", "CHC", "MIL", "PIT", "CIN"]
NL_West = ["LAD", "SFG", "SDP", "ARI", "COL"]
Divisions = [AL_West, AL_Central, AL_East, NL_West, NL_Central, NL_East]
AL = [AL_West, AL_Central, AL_East]
NL = [NL_West, NL_Central, NL_East]
DivisionsLeague = [AL, NL]
AL_Teams = ["TEX", "HOU", "SEA", "OAK", "LAA", "CLE", "DET", "CHW", "MIN", "KCR",
"BOS", "NYY", "TOR", "BAL", "TBR"]
NL_Teams = ["LAD", "SFG", "SDP", "ARI", "COL", "STL", "CHC", "MIL", "PIT", "CIN",
"PHI", "WSN", "MIA", "NYM", "ATL"]
LeagueTeams = [AL_Teams, NL_Teams]
#init Odds arrays
resultsDF = pd.DataFrame()
teams = []
divOdds = []
wcOdds = []
expectedWins = []
ntrials=100000
np.random.seed(seed=54321)
for league in DivisionsLeague:
#Init wildcard counters per league
resultsWC = Counter({0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0
, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0})
wcTempResults = []
for div in league:
#init div winner counters
results = Counter({0: 0, 1: 0, 2: 0, 3: 0, 4: 0})
tempResults = []
for team in div:
alphaEst = beta18Pre[beta18Pre.Team_Abbr == team]["PriorAlpha"].values[0]
betaEst = beta18Pre[beta18Pre.Team_Abbr == team]["PriorBeta"].values[0]
sample = beta.rvs(alphaEst, betaEst, size=ntrials)
tempResults.append(np.round(sample*162,0))
expectedWins.append(np.round(np.mean(sample*162),0))
#Find division winners
divMaxesIndx = np.argmax(np.array(tempResults), axis=0)
results.update(divMaxesIndx)
teams.extend(div)
divOdds.extend(np.array(list(results.values()))/float(ntrials))
#remove division winners
tempResults = np.transpose(np.array(tempResults))
tempResults[np.arange(len(tempResults)), np.argmax(tempResults, axis=1)] = 0
wcTempResults.extend(np.transpose(tempResults))
#find league wildcards
wcTempResults = np.array(wcTempResults)
argMaxesIndx = [heapq.nlargest(2, range(len(wcTempResults[:,i])),
key=wcTempResults[:,i].__getitem__)
for i in range(np.size(wcTempResults,1))]
resultsWC.update(np.array(argMaxesIndx).flatten())
wcOdds.extend(np.array(list(resultsWC.values()))/float(ntrials))
resultsDF["Teams"] = teams
resultsDF["DivisionOdds20180328"] = divOdds
resultsDF["WildCardOdds20180328"] = wcOdds
resultsDF["PlayoffOdds20180328"] = resultsDF.DivisionOdds20180328 + resultsDF.WildCardOdds20180328
resultsDF["ExpectedWins20180328"] = expectedWins
#Attach point win estimates and confidence itervals
#resultsDF = resultsDF.sort_values(by=["Teams"]).reset_index(drop=True)
#beta18PreSorted = beta18Pre.sort_values(by=["Team_Abbr"]).reset_index(drop=True)
#resultsDF_Full = pd.concat([resultsDF, beta18PreSorted.iloc[:,19:37]], axis=1)
#resultsDF.to_csv("mlb2017PreseasonPlayoffPreds.csv", index=False)
###############################################################################
#Create playoff odds per day per prior per team
#read data downloaded from xmlStats API
mlb18Results = pd.read_csv("mlb2018SeasonResults.csv")
from datetime import timedelta, date
#https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
dates = []
start_date = date(2018, 3, 29)
end_date = date(2018, 9, 20)
for single_date in daterange(start_date, end_date):
dates.append(single_date.strftime("%Y%m%d"))
dateLen = len(dates)
#resultsDF = pd.DataFrame()
for i in range(len(dates)):
currDate = dates[i]
#init Odds arrays
teams = []
divOdds = []
wcOdds = []
expectedWins = []
ntrials=100000
np.random.seed(seed=54321)
for league in DivisionsLeague:
#Init wildcard counters per league
resultsWC = Counter({0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0
, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0})
wcTempResults = []
for div in league:
#init div winner counters
results = Counter({0: 0, 1: 0, 2: 0, 3: 0, 4: 0})
tempResults = []
for team in div:
#get priors
priorA = beta18Pre[beta18Pre.Team_Abbr == team]["PriorAlpha"].values[0]
priorB = beta18Pre[beta18Pre.Team_Abbr == team]["PriorBeta"].values[0]
#get posteriors
team18Res = mlb18Results[mlb18Results.Team_Abbr==team].iloc[:,2:((2*dateLen)+2)]
teamWins = team18Res.iloc[:,range(0,(2*dateLen),2)].values[0]
teamLosses = team18Res.iloc[:,range(1,(2*dateLen)+1,2)].values[0]
posteriorAlpha = priorA + teamWins[i]
posteriorBeta = priorB + teamLosses[i]
#where the magic happens
sample = beta.rvs(posteriorAlpha, posteriorBeta, size=ntrials)
gamesLeft = 162 - teamWins[i] - teamLosses[i]
winEstimate = np.round(teamWins[i] + sample*gamesLeft,0)
tempResults.append(winEstimate)
expectedWins.append(np.round(np.mean(teamWins[i] + sample*gamesLeft),0))
#Find division winners
divMaxesIndx = np.argmax(np.array(tempResults), axis=0)
results.update(divMaxesIndx)
teams.extend(div)
divOdds.extend(np.array(list(results.values()))/float(ntrials))
#remove division winners
tempResults = np.transpose(np.array(tempResults))
tempResults[np.arange(len(tempResults)), np.argmax(tempResults, axis=1)] = 0
wcTempResults.extend(np.transpose(tempResults))
#find league wildcards
wcTempResults = np.array(wcTempResults)
argMaxesIndx = [heapq.nlargest(2, range(len(wcTempResults[:,j])),
key=wcTempResults[:,j].__getitem__)
for j in range(np.size(wcTempResults,1))]
resultsWC.update(np.array(argMaxesIndx).flatten())
wcOdds.extend(np.array(list(resultsWC.values()))/float(ntrials))
resultsDF["Teams"] = teams
resultsDF["DivisionOdds" + currDate] = divOdds
resultsDF["WildCardOdds" + currDate] = wcOdds
resultsDF["PlayoffOdds" + currDate] = resultsDF["DivisionOdds" + currDate] + resultsDF["WildCardOdds" + currDate]
resultsDF["ExpectedWins" + currDate] = expectedWins
resultsDF.to_csv("mlb2018PlayoffPreds.csv", index=False)
#######################################################################
#Playoff Odds
resultsDF = pd.read_csv("mlb2018PlayoffPreds.csv")
#import plotly.plotly as py
import plotly.graph_objs as go
import plotly.offline as offline
import datetime
dates = []
start_date = date(2018, 3, 28)
end_date = date(2018, 9, 20)
for single_date in daterange(start_date, end_date):
dates.append(single_date)
def to_unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds() * 1000
#https://teamcolorcodes.com/mlb-color-codes/
teamColors = dict([('LAD', 'rgb(0,90,156)'), ('ARI', 'rgb(167,25,48)'), ('COL', 'rgb(51,0,111)'),
('SDP', 'rgb(255,199,44)'), ('SFG', 'rgb(253,90,30)'), ('CHC', 'rgb(14,51,134)'),
('STL', 'rgb(196,30,58)'), ('MIL', 'rgb(19,41,75)'), ('PIT', 'rgb(253,184,39)'),
('CIN', 'rgb(198,1,31)'), ('WSN', 'rgb(171,0,3)'), ('PHI', 'rgb(232,24,40)'),
('ATL', 'rgb(19, 39, 79)'), ('MIA', 'rgb(255,102,0)'), ('NYM', 'rgb(0,45, 114)'),
('TEX', 'rgb(0,50,120)'), ('HOU', 'rgb(235,110,31)'), ('LAA', 'rgb(186,0,33)'),
('SEA', 'rgb(0,92,92)'), ('OAK', 'rgb(0,56,49)'), ('CLE', 'rgb(227,25,55)'),
('DET', 'rgb(250,70,22)'), ('KCR', 'rgb(0,70,135)'), ('MIN', 'rgb(0,43,92)'),
('CHW', 'rgb(39,37,31)'), ('NYY', 'rgb(12,35,64)'), ('BOS', 'rgb(189, 48, 57)'),
('BAL', 'rgb(223,70,1)'), ('TBR', 'rgb(143,188,230)'), ('TOR', 'rgb(19,74,142)')])
#Division Aggregate - 24 plots
divNames = ['AL_West', 'AL_Central', 'AL_East', 'NL_West', 'NL_Central', 'NL_East']
dataTypes = ['Playoff', 'Division', 'WildCard', 'ExpectedWins']
i=0
for league in DivisionsLeague:
for division in league:
for dataType in dataTypes:
dataToPlot = resultsDF[resultsDF.Teams.isin(division)]
teamHeaders = dataToPlot.Teams.values
cols = dataToPlot.columns[dataToPlot.columns.str.startswith(dataType)]
dataToPlot = dataToPlot[cols]
dataToPlot = pd.DataFrame(np.transpose(dataToPlot.values))
dataToPlot.columns = teamHeaders
divisionName = divNames[i]
if 'AL' in divisionName:
fileNamePrefix = '/HTML/Division/AL/'
else:
fileNamePrefix = '/HTML/Division/NL/'
if dataType != 'ExpectedWins':
plotTitle = divisionName + ' 2018 Bayesian ' + dataType + ' Probabilities'
yLabel = dataType + ' Probability'
fileName = os.getcwd() + fileNamePrefix + divisionName + '_2018_' + dataType + '_Probs'
yStart = 0
yEnd = 1.05
hoverFormat = '.2f'
else:
plotTitle = divisionName + ' 2018 Bayesian Expected Wins'
yLabel = "Expected Wins"
fileName = os.getcwd() + fileNamePrefix + divisionName + '_2018_' + dataType
yStart = 45
yEnd = 120
hoverFormat = '.0f'
x = dates
data = []
for teamAbbr in teamHeaders:
trace = go.Scatter(
x=x,
y=dataToPlot[teamAbbr],
mode='lines+markers',
name = teamAbbr,
line = dict(
color = teamColors[teamAbbr],
width = 4,
shape='linear'))
data.append(trace)
layout = go.Layout(
title = plotTitle,
yaxis = dict(title = yLabel,
range = [yStart, yEnd],
hoverformat = hoverFormat),
xaxis = dict(title = '',
range = [to_unix_time(datetime.datetime(2018, 3, 28)),
to_unix_time(datetime.datetime(2018, 9, 20))]))
fig = go.Figure(data = data, layout = layout)
offline.plot(fig, filename = fileName + '.html')
i += 1
#League Aggregate - 8 plots
leagueNames = ['American League', 'National League']
i=0
for league in LeagueTeams:
for dataType in dataTypes:
dataToPlot = resultsDF[resultsDF.Teams.isin(league)]
teamHeaders = dataToPlot.Teams.values
cols = dataToPlot.columns[dataToPlot.columns.str.startswith(dataType)]
dataToPlot = dataToPlot[cols]
dataToPlot = pd.DataFrame(np.transpose(dataToPlot.values))
dataToPlot.columns = teamHeaders
leagueName = leagueNames[i]
if 'American' in leagueName:
fileNamePrefix = '/HTML/League/AL/'
else:
fileNamePrefix = '/HTML/League/NL/'
if dataType != 'ExpectedWins':
plotTitle = leagueName + ' 2018 Bayesian ' + dataType + ' Probabilities'
yLabel = dataType + ' Probability'
fileName = os.getcwd() + fileNamePrefix + leagueName + '_2018_' + dataType + '_Probs'
yStart = 0
yEnd = 1.05
hoverFormat = '.2f'
else:
plotTitle = leagueName + ' 2018 Bayesian Expected Wins'
yLabel = "Expected Wins"
fileName = os.getcwd() + fileNamePrefix + leagueName + '_2018_' + dataType
yStart = 45
yEnd = 120
hoverFormat = '.0f'
x = dates
data = []
for teamAbbr in teamHeaders:
trace = go.Scatter(
x=x,
y=dataToPlot[teamAbbr],
mode='lines+markers',
name = teamAbbr,
line = dict(
color = teamColors[teamAbbr],
width = 4,
shape='linear'))
data.append(trace)
layout = go.Layout(
title = plotTitle,
yaxis = dict(title = yLabel,
range = [yStart, yEnd],
hoverformat = hoverFormat),
xaxis = dict(title = '',
range = [to_unix_time(datetime.datetime(2018, 3, 28)),
to_unix_time(datetime.datetime(2018, 9, 20))]))
fig = go.Figure(data = data, layout = layout)
offline.plot(fig, filename = fileName + '.html')
i += 1
#Level Aggregate - 4 plots
levNames = ['MLB']
i=0
for dataType in dataTypes:
dataToPlot = resultsDF
teamHeaders = dataToPlot.Teams.values
cols = dataToPlot.columns[dataToPlot.columns.str.startswith(dataType)]
dataToPlot = dataToPlot[cols]
dataToPlot = pd.DataFrame(np.transpose(dataToPlot.values))
dataToPlot.columns = teamHeaders
levName = levNames[i]
fileNamePrefix = '/HTML/Level/'
if dataType != 'ExpectedWins':
plotTitle = levName + ' 2018 Bayesian ' + dataType + ' Probabilities'
yLabel = dataType + ' Probability'
fileName = os.getcwd() + fileNamePrefix + levName + '_2018_' + dataType + '_Probs'
yStart = 0
yEnd = 1.05
hoverFormat = '.2f'
else:
plotTitle = levName + ' 2018 Bayesian Expected Wins'
yLabel = "Expected Wins"
fileName = os.getcwd() + fileNamePrefix + levName + '_2018_' + dataType
yStart = 45
yEnd = 120
hoverFormat = '.0f'
x = dates
data = []
for teamAbbr in teamHeaders:
trace = go.Scatter(
x=x,
y=dataToPlot[teamAbbr],
mode='lines+markers',
name = teamAbbr,
line = dict(
color = teamColors[teamAbbr],
width = 4,
shape='linear'))
data.append(trace)
layout = go.Layout(
title = plotTitle,
yaxis = dict(title = yLabel,
range = [yStart, yEnd],
hoverformat = hoverFormat),
xaxis = dict(title = '',
range = [to_unix_time(datetime.datetime(2018, 3, 28)),
to_unix_time(datetime.datetime(2018, 9, 20))]))
fig = go.Figure(data = data, layout = layout)
offline.plot(fig, filename = fileName + '.html')
########################################################
dates = []
start_date = date(2018, 3, 29)
end_date = date(2018, 9, 4)
for single_date in daterange(start_date, end_date):
dates.append(single_date.strftime("%Y%m%d"))
dateLen = len(dates)
team = "BOS"
ntrials=100000
np.random.seed(seed=54321)
i = dateLen - 1
priorA = beta18Pre[beta18Pre.Team_Abbr == team]["PriorAlpha"].values[0]
priorB = beta18Pre[beta18Pre.Team_Abbr == team]["PriorBeta"].values[0]
#get posteriors
team18Res = mlb18Results[mlb18Results.Team_Abbr==team].iloc[:,2:((2*dateLen)+2)]
teamWins = team18Res.iloc[:,range(0,(2*dateLen),2)].values[0]
teamLosses = team18Res.iloc[:,range(1,(2*dateLen)+1,2)].values[0]
posteriorAlpha = priorA + teamWins[i]
posteriorBeta = priorB + teamLosses[i]
#where the magic happens
sample = beta.rvs(posteriorAlpha, posteriorBeta, size=ntrials)
gamesLeft = 162 - teamWins[i] - teamLosses[i]
winEstimate = np.round(teamWins[i] + sample*gamesLeft,0)
print(np.mean(winEstimate))
print(np.percentile(winEstimate, 2.5))
print(np.percentile(winEstimate, 97.5))
print(np.mean(sample))
print(np.percentile(sample, 2.5))
print(np.percentile(sample, 97.5))
######################################################################
#init contstants
team = "BOS"
ntrials=100000
np.random.seed(seed=54321)
priorA = beta18Pre[beta18Pre.Team_Abbr == team]["PriorAlpha"].values[0]
priorB = beta18Pre[beta18Pre.Team_Abbr == team]["PriorBeta"].values[0]
#get posteriors
team18Res = mlb18Results[mlb18Results.Team_Abbr==team].iloc[:,2:((2*dateLen)+2)]
teamWins = team18Res.iloc[:,range(0,(2*dateLen),2)].values[0]
teamLosses = team18Res.iloc[:,range(1,(2*dateLen)+1,2)].values[0]
posteriorAlpha = priorA + teamWins[i]
posteriorBeta = priorB + teamLosses[i]
#where the magic happens
sample = beta.rvs(posteriorAlpha, posteriorBeta, size=ntrials)
gamesLeft = 162 - teamWins[i] - teamLosses[i]
sampleWins = np.round(teamWins[i] + sample*gamesLeft,0)
prob = len(sampleWins[sampleWins >= 106])/float(ntrials)
print(prob) | gpl-3.0 |
CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/plotting/plotMultiObjectiveData3D.py | 8 | 2231 |
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import sys
import scipy
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
import numpy as np
sys.path.append('../../')
from util import readCSVDictToMutliObjData
from util import getParetoFront
# filename = '../../data/optimization/sf/multiObjective/SteerStatsOpt2.csv'
filename = sys.argv[1]
xs = []
ys = []
zs = []
if len(sys.argv) == 2:
dataFile = open(filename, "r")
weighties = dataFile.readline()[:-1].split(',')
dataFile.close()
dataFile = open(filename, "r")
fitnesses, parameters = readCSVDictToMutliObjData(dataFile, 3, weighties)
dataFile.close()
front = getParetoFront(fitnesses, parameters)
print "front " + str(len(front))
print front
xs = fitnesses[:,0]
ys = fitnesses[:,1]
zs = fitnesses[:,2]
elif len(sys.argv) == 3:
for i in range(1, int(sys.argv[2])):
tmp_filename = filename + str(i) + ".csv"
csvfile = open(tmp_filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
xs.append(float(row[0]))
ys.append(float(row[1]))
zs.append(float(row[2]))
print "xs = " + str(xs)
print "ys = " + str(ys)
print "zs = " + str(zs)
fig = plt.figure()
x_min = np.amin(xs)
x_max = np.amax(xs)
y_min = np.amin(ys)
y_max = np.amax(ys)
z_min = np.amin(zs)
z_max = np.amax(zs)
new_xs = (xs - x_min) / (x_max - x_min)
new_ys = (ys - y_min) / (y_max - y_min)
new_zs = (zs - z_min) / (z_max - z_min)
tri = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
ax = fig.add_subplot(111, projection='3d')
# ax = fig.gca(projection='3d')
# ax.plot_wireframe(xs, ys, zs, rstride=1, cstride=1)
ax.plot_trisurf(new_xs, new_ys, new_zs, cmap=cm.jet, linewidth=0.1)
# ax.plot_trisurf(tri[:,0], tri[:,1], tri[:,2], linewidth=0.2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_zlim([0.0, 1.0])
ax.set_xlabel('Efficency Metric', fontsize=18)
ax.set_ylabel('PLE Metric', fontsize=18)
ax.set_zlabel('Entropy Metric', fontsize=18)
# ax.set_title("Multi-Objective Optimization")
plt.axis("tight")
plt.show()
| gpl-3.0 |
JKarathiya/Lean | Algorithm.Python/NLTKSentimentTradingAlgorithm.py | 1 | 3025 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import clr
clr.AddReference("System")
clr.AddReference("QuantConnect.Algorithm")
clr.AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
import pandas as pd
import nltk
# for details of NLTK, please visit https://www.nltk.org/index.html
class NLTKSentimentTradingAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2018, 1, 1) # Set Start Date
self.SetEndDate(2019, 1, 1) # Set End Date
self.SetCash(100000) # Set Strategy Cash
spy = self.AddEquity("SPY", Resolution.Minute)
self.text = self.get_text() # Get custom text data for creating trading signals
self.symbols = [spy.Symbol] # This can be extended to multiple symbols
# for what extra models needed to download, please use code nltk.download()
nltk.download('punkt')
self.Schedule.On(self.DateRules.EveryDay("SPY"), self.TimeRules.AfterMarketOpen("SPY", 30), self.Trade)
def Trade(self):
current_time = f'{self.Time.year}-{self.Time.month}-{self.Time.day}'
current_text = self.text.loc[current_time][0]
words = nltk.word_tokenize(current_text)
# users should decide their own positive and negative words
positive_word = 'Up'
negative_word = 'Down'
for holding in self.Portfolio.Values:
# liquidate if it contains negative words
if negative_word in words and holding.Invested:
self.Liquidate(holding.Symbol)
# buy if it contains positive words
if positive_word in words and not holding.Invested:
self.SetHoldings(holding.Symbol, 1 / len(self.symbols))
def get_text(self):
# import custom data
# Note: dl must be 1, or it will not download automatically
url = 'https://www.dropbox.com/s/7xgvkypg6uxp6xl/EconomicNews.csv?dl=1'
data = self.Download(url).split('\n')
headline = [x.split(',')[1] for x in data][1:]
date = [x.split(',')[0] for x in data][1:]
# create a pd dataframe with 1st col being date and 2nd col being headline (content of the text)
df = pd.DataFrame(headline, index = date, columns = ['headline'])
return df | apache-2.0 |
MartinDelzant/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
jreback/pandas | pandas/tests/frame/test_query_eval.py | 2 | 47508 | from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
msg = (
r"'numexpr' is not installed or an unsupported version. "
r"Cannot use engine='numexpr' for query/eval if 'numexpr' is "
r"not installed"
)
with pytest.raises(ImportError, match=msg):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError, match=msg):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame({"A": np.random.randn(25000)})
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
def test_eval_object_dtype_binop(self):
# GH#24883
df = DataFrame({"a1": ["Y", "N"]})
res = df.eval("c = ((a1 == 'Y') & True)")
expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]})
tm.assert_frame_equal(res, expected)
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, "rating"]
df = DataFrame(np.random.randn(10, 2), index=index)
res = df.query("rating == 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind == 1]
tm.assert_frame_equal(res, exp)
res = df.query("rating != 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind != 1]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
tm.assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = tm.makeCustomDataframe(
10, 3, r_idx_nlevels=2, r_idx_names=["spam", "eggs"]
)
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {
"index": df.index,
"columns": col_series,
"spam": to_series(df.index, "spam"),
"eggs": to_series(df.index, "eggs"),
"C0": col_series,
}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas:
@classmethod
def setup_class(cls):
cls.engine = "numexpr"
cls.parser = "pandas"
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d["dates1"] = date_range("1/1/2012", periods=n)
d["dates3"] = date_range("1/1/2014", periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(
{"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)}
)
result = df.query("dates == nondate", parser=parser, engine=engine)
assert len(result) == 0
result = df.query("dates != nondate", parser=parser, engine=engine)
tm.assert_frame_equal(result, df)
msg = r"Invalid comparison between dtype=datetime64\[ns\] and ndarray"
for op in ["<", ">", "<=", ">="]:
with pytest.raises(TypeError, match=msg):
df.query(f"dates {op} nondate", parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)})
msg = "invalid syntax"
with pytest.raises(SyntaxError, match=msg):
df.query("i - +", engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list("ab"))
a, b = 1, 2 # noqa
res = df.query("a > b", engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query("@a > b", engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("@a > b > @c", engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"):
df.query("@a > b > c", engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"):
df.query("sin > 5", engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
msg = "Variables in expression.+"
with pytest.raises(NumExprClobberingError, match=msg):
df.query("sin > 5", engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
tm.assert_frame_equal(
df.query("a < b", engine=engine, parser=parser), df[df.a < df.b]
)
tm.assert_frame_equal(
df.query("a + b > b * c", engine=engine, parser=parser),
df[df.a + df.b > df.b * df.c],
)
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=Index(range(10), name="blob"),
columns=["a", "b", "c"],
)
res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("blob < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=range(10),
columns=["a", "b", "c"],
)
# "index" should refer to the index
res = df.query("index < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
# test against a scalar
res = df.query("index < 5", engine=engine, parser=parser)
expec = df[df.index < 5]
tm.assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval(
"df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser
)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
tm.assert_frame_equal(result, expected)
result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser)
expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("df > 0", engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(100, 10), columns=list("abcdefghij"))
b = 1
expect = df[df.a < b]
result = df.query("a < @b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query("a < b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list("abc")
df = DataFrame(np.random.randn(100, len(cols)), columns=cols)
res = df.query(
"a < b < c and a not in b not in c", engine=engine, parser=parser
)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
expec = df[ind]
tm.assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name="a")
b = Series(np.random.randint(10, size=15), name="b")
df = DataFrame({"a": a, "b": b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query("b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name="b")
expected = df.loc[(b - 1).isin(a)]
result = df.query("@b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list("ab"))
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("a == @c", engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
df.index.name = "index"
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df[df["index"] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
result = df.query("ilevel_0 > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": a, "b": np.random.randn(a.size)})
df.index.name = "a"
result = df.query("a > 5", engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({"a": np.random.rand(n), "b": np.random.rand(n)})
df.loc[::2, 0] = np.inf
d = {"==": operator.eq, "!=": operator.ne}
for op, f in d.items():
q = f"a {op} inf"
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
def test_check_tz_aware_index_query(self, tz_aware_fixture):
# https://github.com/pandas-dev/pandas/issues/29463
tz = tz_aware_fixture
df_index = pd.date_range(
start="2019-01-01", freq="1d", periods=10, tz=tz, name="time"
)
expected = DataFrame(index=df_index)
df = DataFrame(index=df_index)
result = df.query('"2018-01-03 00:00:00+00" < time')
tm.assert_frame_equal(result, expected)
expected = DataFrame(df_index)
result = df.reset_index().query('"2018-01-03 00:00:00+00" < time')
tm.assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "numexpr"
cls.parser = "python"
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query(
"(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query(
"(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
msg = r"'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("index < 20130101 < dates3", engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval("x + 1", engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
msg = r"The '@' prefix is only supported by the pandas parser"
with pytest.raises(SyntaxError, match=msg):
df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("(df>0) & (df2>0)", engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval("df[(df > 0) & (df2 > 0)]", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval(
"df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]", engine=engine, parser=parser
)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "python"
cls.parser = "pandas"
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = cls.parser = "python"
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryStrings:
def test_str_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings == "a"]
if parser != "pandas":
col = "strings"
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
msg = r"'(Not)?In' nodes are not implemented"
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
with pytest.raises(NotImplementedError, match=msg):
df.query(
ex,
engine=engine,
parser=parser,
local_dict={"strings": df.strings},
)
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[df.strings.isin(["a"])])
expect = df[df.strings != "a"]
res = df.query('strings != "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[~df.strings.isin(["a"])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings.isin(["a", "b"])]
if parser != "pandas":
col = "strings"
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
msg = r"'(Not)?In' nodes are not implemented"
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
with pytest.raises(NotImplementedError, match=msg):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
expect = df[~df.strings.isin(["a", "b"])]
res = df.query('strings != ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.randint(5, size=12),
"d": np.random.randint(9, size=12),
}
)
if parser == "pandas":
res = df.query("a in b", parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("a in b and c < d", parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
tm.assert_frame_equal(res, expec)
else:
msg = r"'(Not)?In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("a in b", parser=parser, engine=engine)
msg = r"'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("a in b and c < d", parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.randint(5, size=12),
"d": np.random.randint(9, size=12),
}
)
res = df.query("a == b", parser=parser, engine=engine)
exp = df[df.a == df.b]
tm.assert_frame_equal(res, exp)
res = df.query("a != b", parser=parser, engine=engine)
exp = df[df.a != df.b]
tm.assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(
StringIO(raw), sep=r"\s{2,}", engine="python", parse_dates=["timestamp"]
)
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == "test & test"]
tm.assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
a = Series(np.random.choice(list("abcde"), 20))
b = Series(np.arange(a.size))
df = DataFrame({"X": a, "Y": b})
ops = {"<": operator.lt, ">": operator.gt, "<=": operator.le, ">=": operator.ge}
for op, func in ops.items():
res = df.query(f'X {op} "d"', engine=engine, parser=parser)
expected = df[func(df.X, "d")]
tm.assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = "bid", "bidsize", "ask", "asksize"
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query("bid & ask", engine=engine, parser=parser)
expected = df[df.bid & df.ask]
tm.assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame(
{
"Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"],
"Price": [109.70, 109.72, 183.30, 183.35],
}
)
e = df[df.Symbol == "BUD US"]
symb = "BUD US" # noqa
r = df.query("Symbol == @symb", parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
class TestDataFrameEvalWithFrame:
def setup_method(self, method):
self.frame = DataFrame(np.random.randn(10, 3), columns=list("abc"))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self, parser, engine):
res = self.frame.eval("a + b", engine=engine, parser=parser)
expect = self.frame.a + self.frame.b
tm.assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
res = self.frame.eval("a[a < 1] + b", engine=engine, parser=parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
tm.assert_series_equal(res, expect)
@pytest.mark.parametrize("op", ["+", "-", "*", "/"])
def test_invalid_type_for_operator_raises(self, parser, engine, op):
df = DataFrame({"a": [1, 2], "b": ["c", "d"]})
msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'"
with pytest.raises(TypeError, match=msg):
df.eval(f"a {op} b", engine=engine, parser=parser)
class TestDataFrameQueryBacktickQuoting:
@pytest.fixture(scope="class")
def df(self):
"""
Yields a dataframe with strings that may or may not need escaping
by backticks. The last two columns cannot be escaped by backticks
and should raise a ValueError.
"""
yield DataFrame(
{
"A": [1, 2, 3],
"B B": [3, 2, 1],
"C C": [4, 5, 6],
"C C": [7, 4, 3],
"C_C": [8, 9, 10],
"D_D D": [11, 1, 101],
"E.E": [6, 3, 5],
"F-F": [8, 1, 10],
"1e1": [2, 4, 8],
"def": [10, 11, 2],
"A (x)": [4, 1, 3],
"B(x)": [1, 1, 5],
"B (x)": [2, 7, 4],
" &^ :!€$?(} > <++*'' ": [2, 5, 6],
"": [10, 11, 1],
" A": [4, 7, 9],
" ": [1, 2, 1],
"it's": [6, 3, 1],
"that's": [9, 1, 8],
"☺": [8, 7, 6],
"foo#bar": [2, 4, 5],
1: [5, 7, 9],
}
)
def test_single_backtick_variable_query(self, df):
res = df.query("1 < `B B`")
expect = df[1 < df["B B"]]
tm.assert_frame_equal(res, expect)
def test_two_backtick_variables_query(self, df):
res = df.query("1 < `B B` and 4 < `C C`")
expect = df[(1 < df["B B"]) & (4 < df["C C"])]
tm.assert_frame_equal(res, expect)
def test_single_backtick_variable_expr(self, df):
res = df.eval("A + `B B`")
expect = df["A"] + df["B B"]
tm.assert_series_equal(res, expect)
def test_two_backtick_variables_expr(self, df):
res = df.eval("`B B` + `C C`")
expect = df["B B"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_already_underscore_variable(self, df):
res = df.eval("`C_C` + A")
expect = df["C_C"] + df["A"]
tm.assert_series_equal(res, expect)
def test_same_name_but_underscores(self, df):
res = df.eval("C_C + `C C`")
expect = df["C_C"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_mixed_underscores_and_spaces(self, df):
res = df.eval("A + `D_D D`")
expect = df["A"] + df["D_D D"]
tm.assert_series_equal(res, expect)
def test_backtick_quote_name_with_no_spaces(self, df):
res = df.eval("A + `C_C`")
expect = df["A"] + df["C_C"]
tm.assert_series_equal(res, expect)
def test_special_characters(self, df):
res = df.eval("`E.E` + `F-F` - A")
expect = df["E.E"] + df["F-F"] - df["A"]
tm.assert_series_equal(res, expect)
def test_start_with_digit(self, df):
res = df.eval("A + `1e1`")
expect = df["A"] + df["1e1"]
tm.assert_series_equal(res, expect)
def test_keyword(self, df):
res = df.eval("A + `def`")
expect = df["A"] + df["def"]
tm.assert_series_equal(res, expect)
def test_unneeded_quoting(self, df):
res = df.query("`A` > 2")
expect = df[df["A"] > 2]
tm.assert_frame_equal(res, expect)
def test_parenthesis(self, df):
res = df.query("`A (x)` > 2")
expect = df[df["A (x)"] > 2]
tm.assert_frame_equal(res, expect)
def test_empty_string(self, df):
res = df.query("`` > 5")
expect = df[df[""] > 5]
tm.assert_frame_equal(res, expect)
def test_multiple_spaces(self, df):
res = df.query("`C C` > 5")
expect = df[df["C C"] > 5]
tm.assert_frame_equal(res, expect)
def test_start_with_spaces(self, df):
res = df.eval("` A` + ` `")
expect = df[" A"] + df[" "]
tm.assert_series_equal(res, expect)
def test_lots_of_operators_string(self, df):
res = df.query("` &^ :!€$?(} > <++*'' ` > 4")
expect = df[df[" &^ :!€$?(} > <++*'' "] > 4]
tm.assert_frame_equal(res, expect)
def test_missing_attribute(self, df):
message = "module 'pandas' has no attribute 'thing'"
with pytest.raises(AttributeError, match=message):
df.eval("@pd.thing")
def test_failing_quote(self, df):
msg = r"(Could not convert ).*( to a valid Python identifier.)"
with pytest.raises(SyntaxError, match=msg):
df.query("`it's` > `that's`")
def test_failing_character_outside_range(self, df):
msg = r"(Could not convert ).*( to a valid Python identifier.)"
with pytest.raises(SyntaxError, match=msg):
df.query("`☺` > 4")
def test_failing_hashtag(self, df):
msg = "Failed to parse backticks"
with pytest.raises(SyntaxError, match=msg):
df.query("`foo#bar` > 4")
def test_call_non_named_expression(self, df):
"""
Only attributes and variables ('named functions') can be called.
.__call__() is not an allowed attribute because that would allow
calling anything.
https://github.com/pandas-dev/pandas/pull/32460
"""
def func(*_):
return 1
funcs = [func] # noqa
df.eval("@func()")
with pytest.raises(TypeError, match="Only named functions are supported"):
df.eval("@funcs[0]()")
with pytest.raises(TypeError, match="Only named functions are supported"):
df.eval("@funcs[0].__call__()")
| bsd-3-clause |
Midafi/scikit-image | doc/examples/plot_multiblock_local_binary_pattern.py | 22 | 2498 | """
===========================================================
Multi-Block Local Binary Pattern for texture classification
===========================================================
This example shows how to compute multi-block local binary pattern (MB-LBP)
features as well as how to visualize them.
The features are calculated similarly to local binary patterns (LBPs), except
that summed blocks are used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales in
constant time using the integral image. 9 equally-sized rectangles are used to
compute a feature. For each rectangle, the sum of the pixel intensities is
computed. Comparisons of these sums to that of the central rectangle determine
the feature, similarly to LBP (See `LBP <plot_local_binary_pattern.html>`_).
First, we generate an image to illustrate the functioning of MB-LBP: consider
a (9, 9) rectangle and divide it into (3, 3) block, upon which we then apply
MB-LBP.
"""
from __future__ import print_function
from skimage.feature import multiblock_lbp
import numpy as np
from numpy.testing import assert_equal
from skimage.transform import integral_image
# Create test matrix where first and fifth rectangles starting
# from top left clockwise have greater value than the central one.
test_img = np.zeros((9, 9), dtype='uint8')
test_img[3:6, 3:6] = 1
test_img[:3, :3] = 50
test_img[6:, 6:] = 50
# First and fifth bits should be filled. This correct value will
# be compared to the computed one.
correct_answer = 0b10001000
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
assert_equal(correct_answer, lbp_code)
"""
Now let's apply the operator to a real image and see how the
visualization works.
"""
from skimage import data
from matplotlib import pyplot as plt
from skimage.feature import draw_multiblock_lbp
test_img = data.coins()
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 90, 90)
img = draw_multiblock_lbp(test_img, 0, 0, 90, 90,
lbp_code=lbp_code, alpha=0.5)
plt.imshow(img, interpolation='nearest')
plt.show()
"""
.. image:: PLOT2RST.current_figure
On the above plot we see the result of computing a MB-LBP and visualization of
the computed feature. The rectangles that have less intensities' sum than the
central rectangle are marked in cyan. The ones that have higher intensity
values are marked in white. The central rectangle is left untouched.
"""
| bsd-3-clause |
dhomeier/astropy | astropy/nddata/utils.py | 3 | 32016 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import sys
import types
import warnings
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.utils import lazyproperty
from astropy.utils.decorators import AstropyDeprecationWarning
from astropy.wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales
from astropy.wcs import Sip
from .blocks import block_reduce as _block_reduce
from .blocks import block_replicate as _block_replicate
__all__ = ['extract_array', 'add_array', 'subpixel_indices',
'overlap_slices', 'NoOverlapError', 'PartialOverlapError',
'Cutout2D']
# this can be replaced with PEP562 when the minimum required Python
# version is 3.7
class _ModuleWithDeprecation(types.ModuleType):
def __getattribute__(self, name):
deprecated = ('block_reduce', 'block_replicate')
if name in deprecated:
warnings.warn(f'{name} was moved to the astropy.nddata.blocks '
'module. Please update your import statement.',
AstropyDeprecationWarning)
return object.__getattribute__(self, f'_{name}')
return object.__getattribute__(self, name)
sys.modules[__name__].__class__ = _ModuleWithDeprecation
class NoOverlapError(ValueError):
'''Raised when determining the overlap of non-overlapping arrays.'''
pass
class PartialOverlapError(ValueError):
'''Raised when arrays only partially overlap.'''
pass
def overlap_slices(large_array_shape, small_array_shape, position,
mode='partial'):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : tuple of int or int
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slices
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ['partial', 'trim', 'strict']:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape, )
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape, )
if np.isscalar(position):
position = (position, )
if any(~np.isfinite(position)):
raise ValueError('Input position contains invalid values (NaNs or '
'infs).')
if len(small_array_shape) != len(large_array_shape):
raise ValueError('"large_array_shape" and "small_array_shape" must '
'have the same number of dimensions.')
if len(small_array_shape) != len(position):
raise ValueError('"position" must have the same number of dimensions '
'as "small_array_shape".')
# define the min/max pixel indices
indices_min = [int(np.ceil(pos - (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
indices_max = [int(np.ceil(pos + (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
for e_max in indices_max:
if e_max < 0:
raise NoOverlapError('Arrays do not overlap.')
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError('Arrays do not overlap.')
if mode == 'strict':
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError('Arrays overlap only partially.')
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError('Arrays overlap only partially.')
# Set up slices
slices_large = tuple(slice(max(0, indices_min),
min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
if mode == 'trim':
slices_small = tuple(slice(0, slc.stop - slc.start)
for slc in slices_large)
else:
slices_small = tuple(slice(max(0, -indices_min),
min(large_shape - indices_min,
indices_max - indices_min))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
return slices_large, slices_small
def extract_array(array_large, shape, position, mode='partial',
fill_value=np.nan, return_position=False):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : `~numpy.ndarray`
The array from which to extract the small array.
shape : tuple or int
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : `~numpy.ndarray`
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape, )
if np.isscalar(position):
position = (position, )
if mode not in ['partial', 'trim', 'strict']:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(array_large.shape,
shape, position, mode=mode)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == 'partial'):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
try:
extracted_array[:] = fill_value
except ValueError as exc:
exc.args += ('fill_value is inconsistent with the data type of '
'the input array (e.g., fill_value cannot be set to '
'np.nan if the input array has integer type). Please '
'change either the input array dtype or the '
'fill_value.',)
raise exc
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position,
small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : `~numpy.ndarray`
Large array.
array_small : `~numpy.ndarray`
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : `~numpy.ndarray`
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is not smaller
if all(large_shape >= small_shape for (large_shape, small_shape)
in zip(array_large.shape, array_small.shape)):
large_slices, small_slices = overlap_slices(array_large.shape,
array_small.shape,
position)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : `~numpy.ndarray` or array_like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : `~numpy.ndarray`
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : `~numpy.ndarray`
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array_like, or `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : 2 tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : 2 tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : 2 tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : 2 tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or `None`
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(self, data, position, size, wcs=None, mode='trim',
fill_value=np.nan, copy=False):
if wcs is None:
wcs = getattr(data, 'wcs', None)
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError('wcs must be input if position is a '
'SkyCoord')
position = skycoord_to_pixel(position, wcs, mode='all') # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError('size must have at most two elements')
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == 'angle':
if wcs is None:
raise ValueError('wcs must be input if any element '
'of size has angular units')
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis])
shape[axis] = int(np.round(
(side / pixel_scales[axis]).decompose()))
else:
raise ValueError('shape can contain Quantities with only '
'pixel or angular units')
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value,
return_position=True)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
((self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original)) = self.bbox_original
((self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs.array_shape = self.data.shape
if wcs.sip is not None:
self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b,
wcs.sip.ap, wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i]
for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i]
for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
kwargs['fill'] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2., height / 2.
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1)
for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return ((slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1))
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@staticmethod
def _round(a):
"""
Round the input to the nearest integer.
If two integers are equally close, the value is rounded up.
Note that this is different from `np.round`, which rounds to the
nearest even number.
"""
return int(np.floor(a + 0.5))
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (self._round(self.input_position_original[0]),
self._round(self.input_position_original[1]))
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (self._round(self.input_position_cutout[0]),
self._round(self.input_position_cutout[1]))
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
| bsd-3-clause |
andyjost/slang | scripts/neural_nets.py | 1 | 1536 | #!/usr/bin/env python
from itertools import *
from sklearn import datasets
from sklearn import preprocessing
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import *
from sklearn.neural_network import MLPClassifier
import numpy as np
import warnings
warnings.simplefilter('ignore')
def report(classifier, data, target):
y_pred = classifier.fit(data, target).predict(data)
n_missed = (target != y_pred).sum()
print "%s: mislabeled %d/%d points." % (
type(classifier).__name__, n_missed, data.shape[0]
)
data = np.load('data.small.np').astype(dtype=float)
target = np.load('target.small.np').astype(dtype=float)
print
print '=' * 80
print 'Naive Bayes'.center(80)
print '=' * 80
nb = MultinomialNB()
report(nb, data, target)
print
print '=' * 80
print 'SVM'.center(80)
print '=' * 80
classifier = svm.SVC()
report(classifier, data, target)
print
print '=' * 80
print 'Neural Net (scaled)'.center(80)
print '=' * 80
scaled_data = preprocessing.scale(data)
lpc = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(4,6), random_state=1)
report(lpc, scaled_data, target)
print
print '=' * 80
print 'Neural Net w/ Hyperparameter Tuning'.center(80)
print '=' * 80
tuned_parameters = [{
'alpha': 10 ** -np.arange(1.,7.)
, 'hidden_layer_sizes': list(product(range(1,10), np.arange(1,10)))
}]
clf = GridSearchCV(lpc, tuned_parameters, cv=2, n_jobs=4, scoring='precision_macro')
report(clf, data, target)
| gpl-3.0 |
amolkahat/pandas | pandas/tests/arithmetic/test_numeric.py | 1 | 32501 | # -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for numeric dtypes
from decimal import Decimal
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import PY3, Iterable
from pandas.core import ops
from pandas import Timedelta, Series, Index, TimedeltaIndex
# ------------------------------------------------------------------
# Comparisons
class TestNumericComparisons(object):
def test_operator_series_comparison_zerorank(self):
# GH#13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
tm.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
def test_df_numeric_cmp_dt64_raises(self):
# GH#8932, GH#22163
ts = pd.Timestamp.now()
df = pd.DataFrame({'x': range(5)})
with pytest.raises(TypeError):
df > ts
with pytest.raises(TypeError):
df < ts
with pytest.raises(TypeError):
ts < df
with pytest.raises(TypeError):
ts > df
assert not (df == ts).any().any()
assert (df != ts).all().all()
def test_compare_invalid(self):
# GH#8058
# ops testing
a = pd.Series(np.random.randn(5), name=0)
b = pd.Series(np.random.randn(5))
b.name = pd.Timestamp('2000-01-01')
tm.assert_series_equal(a / b, 1 / (b / a))
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaLike(object):
# TODO: also check name retentention
@pytest.mark.parametrize('box_cls', [np.array, pd.Index, pd.Series])
@pytest.mark.parametrize('left', [
pd.RangeIndex(10, 40, 10)] + [cls([10, 20, 30], dtype=dtype)
for dtype in ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f2', 'f4', 'f8']
for cls in [pd.Series, pd.Index]],
ids=lambda x: type(x).__name__ + str(x.dtype))
def test_mul_td64arr(self, left, box_cls):
# GH#22390
right = np.array([1, 2, 3], dtype='m8[s]')
right = box_cls(right)
expected = pd.TimedeltaIndex(['10s', '40s', '90s'])
if isinstance(left, pd.Series) or box_cls is pd.Series:
expected = pd.Series(expected)
result = left * right
tm.assert_equal(result, expected)
result = right * left
tm.assert_equal(result, expected)
# TODO: also check name retentention
@pytest.mark.parametrize('box_cls', [np.array, pd.Index, pd.Series])
@pytest.mark.parametrize('left', [
pd.RangeIndex(10, 40, 10)] + [cls([10, 20, 30], dtype=dtype)
for dtype in ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f2', 'f4', 'f8']
for cls in [pd.Series, pd.Index]],
ids=lambda x: type(x).__name__ + str(x.dtype))
def test_div_td64arr(self, left, box_cls):
# GH#22390
right = np.array([10, 40, 90], dtype='m8[s]')
right = box_cls(right)
expected = pd.TimedeltaIndex(['1s', '2s', '3s'])
if isinstance(left, pd.Series) or box_cls is pd.Series:
expected = pd.Series(expected)
result = right / left
tm.assert_equal(result, expected)
result = right // left
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
left / right
with pytest.raises(TypeError):
left // right
# TODO: de-duplicate with test_numeric_arr_mul_tdscalar
def test_ops_series(self):
# regression test for G#H8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
# TODO: also test non-nanosecond timedelta64 and Tick objects;
# see test_numeric_arr_rdiv_tdscalar for note on these failing
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
# GH#19333
index = numeric_idx
expected = pd.timedelta_range('0 days', '4 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
index = numeric_idx[1:3]
broken = (isinstance(three_days, np.timedelta64) and
three_days.dtype != 'm8[ns]')
broken = broken or isinstance(three_days, pd.offsets.Tick)
if box is not pd.Index and broken:
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
raise pytest.xfail("timedelta64 not converted to nanos; "
"Tick division not implemented")
expected = TimedeltaIndex(['3 Days', '36 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = three_days / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / three_days
# ------------------------------------------------------------------
# Arithmetic
class TestDivisionByZero(object):
def test_div_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx / zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_floordiv_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx // zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_mod_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = idx % zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') % np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_divmod_zero(self, zero, numeric_idx):
idx = numeric_idx
exleft = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
exright = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = divmod(idx, zero)
tm.assert_index_equal(result[0], exleft)
tm.assert_index_equal(result[1], exright)
# ------------------------------------------------------------------
@pytest.mark.parametrize('dtype2', [
np.int64, np.int32, np.int16, np.int8,
np.float64, np.float32, np.float16,
np.uint64, np.uint32, np.uint16, np.uint8])
@pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
def test_ser_div_ser(self, dtype1, dtype2):
# no longer do integer div for any ops, but deal with the 0's
first = Series([3, 4, 5, 8], name='first').astype(dtype1)
second = Series([0, 0, 0, 3], name='second').astype(dtype2)
with np.errstate(all='ignore'):
expected = Series(first.values.astype(np.float64) / second.values,
dtype='float64', name=None)
expected.iloc[0:3] = np.inf
result = first / second
tm.assert_series_equal(result, expected)
assert not result.equals(second / first)
def test_rdiv_zero_compat(self):
# GH#8674
zero_array = np.array([0] * 5)
data = np.random.randn(5)
expected = Series([0.] * 5)
result = zero_array / Series(data)
tm.assert_series_equal(result, expected)
result = Series(zero_array) / data
tm.assert_series_equal(result, expected)
result = Series(zero_array) / Series(data)
tm.assert_series_equal(result, expected)
def test_div_zero_inf_signs(self):
# GH#9144, inf signing
ser = Series([-1, 0, 1], name='first')
expected = Series([-np.inf, np.nan, np.inf], name='first')
result = ser / 0
tm.assert_series_equal(result, expected)
def test_rdiv_zero(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
expected = Series([0.0, np.nan, 0.0], name='first')
result = 0 / ser
tm.assert_series_equal(result, expected)
def test_floordiv_div(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
result = ser // 0
expected = Series([-np.inf, np.nan, np.inf], name='first')
tm.assert_series_equal(result, expected)
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestMultiplicationDivision(object):
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# for non-timestamp/timedelta/period dtypes
@pytest.mark.parametrize('box', [
pytest.param(pd.Index,
marks=pytest.mark.xfail(reason="Index.__div__ always "
"raises",
raises=TypeError, strict=True)),
pd.Series,
pd.DataFrame
], ids=lambda x: x.__name__)
def test_divide_decimal(self, box):
# resolves issue GH#9787
ser = Series([Decimal(10)])
expected = Series([Decimal(5)])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = ser / Decimal(2)
tm.assert_equal(result, expected)
result = ser // Decimal(2)
tm.assert_equal(result, expected)
def test_div_equiv_binop(self):
# Test Series.div as well as Series.__div__
# float/integer issue
# GH#7785
first = Series([1, 0], name='first')
second = Series([-0.01, -0.02], name='second')
expected = Series([-0.01, -np.inf])
result = second.div(first)
tm.assert_series_equal(result, expected, check_names=False)
result = second / first
tm.assert_series_equal(result, expected)
def test_div_int(self, numeric_idx):
# truediv under PY3
idx = numeric_idx
result = idx / 1
expected = idx
if PY3:
expected = expected.astype('float64')
tm.assert_index_equal(result, expected)
result = idx / 2
if PY3:
expected = expected.astype('float64')
expected = Index(idx.values / 2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul, operator.floordiv])
def test_mul_int_identity(self, op, numeric_idx, box):
idx = numeric_idx
idx = tm.box_expected(idx, box)
result = op(idx, 1)
tm.assert_equal(result, idx)
def test_mul_int_array(self, numeric_idx):
idx = numeric_idx
didx = idx * idx
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result, idx * 5)
arr_dtype = 'uint64' if isinstance(idx, pd.UInt64Index) else 'int64'
result = idx * np.arange(5, dtype=arr_dtype)
tm.assert_index_equal(result, didx)
def test_mul_int_series(self, numeric_idx):
idx = numeric_idx
didx = idx * idx
arr_dtype = 'uint64' if isinstance(idx, pd.UInt64Index) else 'int64'
result = idx * Series(np.arange(5, dtype=arr_dtype))
tm.assert_series_equal(result, Series(didx))
def test_mul_float_series(self, numeric_idx):
idx = numeric_idx
rng5 = np.arange(5, dtype='float64')
result = idx * Series(rng5 + 0.1)
expected = Series(rng5 * (rng5 + 0.1))
tm.assert_series_equal(result, expected)
def test_mul_index(self, numeric_idx):
# in general not true for RangeIndex
idx = numeric_idx
if not isinstance(idx, pd.RangeIndex):
result = idx * idx
tm.assert_index_equal(result, idx ** 2)
def test_mul_datelike_raises(self, numeric_idx):
idx = numeric_idx
with pytest.raises(TypeError):
idx * pd.date_range('20130101', periods=5)
def test_mul_size_mismatch_raises(self, numeric_idx):
idx = numeric_idx
with pytest.raises(ValueError):
idx * idx[0:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
@pytest.mark.parametrize('op', [operator.pow, ops.rpow])
def test_pow_float(self, op, numeric_idx, box):
# test power calculations both ways, GH#14973
idx = numeric_idx
expected = pd.Float64Index(op(idx.values, 2.0))
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = op(idx, 2.0)
tm.assert_equal(result, expected)
def test_modulo(self, numeric_idx, box):
# GH#9244
idx = numeric_idx
expected = Index(idx.values % 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx % 2
tm.assert_equal(result, expected)
def test_divmod_scalar(self, numeric_idx):
idx = numeric_idx
result = divmod(idx, 2)
with np.errstate(all='ignore'):
div, mod = divmod(idx.values, 2)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
def test_divmod_ndarray(self, numeric_idx):
idx = numeric_idx
other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2
result = divmod(idx, other)
with np.errstate(all='ignore'):
div, mod = divmod(idx.values, other)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
def test_divmod_series(self, numeric_idx):
idx = numeric_idx
other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2
result = divmod(idx, Series(other))
with np.errstate(all='ignore'):
div, mod = divmod(idx.values, other)
expected = Series(div), Series(mod)
for r, e in zip(result, expected):
tm.assert_series_equal(r, e)
@pytest.mark.parametrize('other', [np.nan, 7, -23, 2.718, -3.14, np.inf])
def test_ops_np_scalar(self, other):
vals = np.random.randn(5, 3)
f = lambda x: pd.DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
tm.assert_frame_equal(df / np.array(other), f(vals / other))
tm.assert_frame_equal(np.array(other) * df, f(vals * other))
tm.assert_frame_equal(df + np.array(other), f(vals + other))
tm.assert_frame_equal(np.array(other) - df, f(other - vals))
# TODO: This came from series.test.test_operators, needs cleanup
def test_operators_frame(self):
# rpow does not work with DataFrame
ts = tm.makeTimeSeries()
ts.name = 'ts'
df = pd.DataFrame({'A': ts})
tm.assert_series_equal(ts + ts, ts + df['A'],
check_names=False)
tm.assert_series_equal(ts ** ts, ts ** df['A'],
check_names=False)
tm.assert_series_equal(ts < ts, ts < df['A'],
check_names=False)
tm.assert_series_equal(ts / ts, ts / df['A'],
check_names=False)
class TestAdditionSubtraction(object):
# __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__
# for non-timestamp/timedelta/period dtypes
# TODO: This came from series.test.test_operators, needs cleanup
def test_arith_ops_df_compat(self):
# GH#1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
# TODO: This came from series.test.test_operators, needs cleanup
def test_series_frame_radd_bug(self):
# GH#353
vals = pd.Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
tm.assert_series_equal(result, expected)
frame = pd.DataFrame({'vals': vals})
result = 'foo_' + frame
expected = pd.DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
ts = tm.makeTimeSeries()
ts.name = 'ts'
# really raise this time
now = pd.Timestamp.now().to_pydatetime()
with pytest.raises(TypeError):
now + ts
with pytest.raises(TypeError):
ts + now
# TODO: This came from series.test.test_operators, needs cleanup
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
ser = pd.Series(np.random.randn(5))
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
# GH#4629
# arithmetic datetime64 ops with an index
ser = pd.Series(pd.date_range('20130101', periods=5),
index=pd.date_range('20130101', periods=5))
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
with pytest.raises(TypeError):
# GH#18850
result = ser - ser.index.to_period()
df = pd.DataFrame(np.random.randn(5, 2),
index=pd.date_range('20130101', periods=5))
df['date'] = pd.Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
tm.assert_series_equal(df['result'], df['expected'], check_names=False)
# TODO: taken from tests.frame.test_operators, needs cleanup
def test_frame_operators(self):
seriesd = tm.getSeriesData()
frame = pd.DataFrame(seriesd)
frame2 = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
garbage = np.random.random(4)
colSeries = pd.Series(garbage, index=np.array(frame.columns))
idSum = frame + frame
seriesSum = frame + colSeries
for col, series in idSum.items():
for idx, val in series.items():
origVal = frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in seriesSum.items():
for idx, val in series.items():
origVal = frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = frame2 + frame2
expected = frame2 * 2
tm.assert_frame_equal(added, expected)
df = pd.DataFrame({'a': ['a', None, 'b']})
tm.assert_frame_equal(df + df,
pd.DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
pd.DataFrame(dtype=dtype),
pd.DataFrame(columns=['A'], dtype=dtype),
pd.DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
tm.assert_frame_equal(df + df, df)
# TODO: taken from tests.series.test_operators; needs cleanup
def test_series_operators(self):
def _check_op(series, other, op, pos_only=False, check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
tser = tm.makeTimeSeries().rename('ts')
check(tser, tser * 2)
check(tser, tser * 0)
check(tser, tser[::2])
check(tser, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(tser, 5)
check_comparators(tser, tser + 1, check_dtype=False)
# TODO: taken from tests.series.test_operators; needs cleanup
def test_divmod(self):
def check(series, other):
results = divmod(series, other)
if isinstance(other, Iterable) and len(series) != len(other):
# if the lengths don't match, this is the test where we use
# `tser[::2]`. Pad every other value in `other_np` with nan.
other_np = []
for n in other:
other_np.append(n)
other_np.append(np.nan)
else:
other_np = other
other_np = np.asarray(other_np)
with np.errstate(all='ignore'):
expecteds = divmod(series.values, np.asarray(other_np))
for result, expected in zip(results, expecteds):
# check the values, name, and index separately
tm.assert_almost_equal(np.asarray(result), expected)
assert result.name == series.name
tm.assert_index_equal(result.index, series.index)
tser = tm.makeTimeSeries().rename('ts')
check(tser, tser * 2)
check(tser, tser * 0)
check(tser, tser[::2])
check(tser, 5)
class TestUFuncCompat(object):
@pytest.mark.parametrize('holder', [pd.Int64Index, pd.UInt64Index,
pd.Float64Index, pd.Series])
def test_ufunc_coercions(self, holder):
idx = holder([1, 2, 3, 4, 5], name='x')
box = pd.Series if holder is pd.Series else pd.Index
result = np.sqrt(idx)
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = np.divide(idx, 2.)
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
# _evaluate_numeric_binop
result = idx + 2.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([3., 4., 5., 6., 7.], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx - 2.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([-1., 0., 1., 2., 3.], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx * 1.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([1., 2., 3., 4., 5.], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx / 2.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
class TestObjectDtypeEquivalence(object):
# Tests that arithmetic operations match operations executed elementwise
@pytest.mark.parametrize('dtype', [None, object])
def test_numarr_with_dtype_add_nan(self, dtype, box):
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = np.nan + ser
tm.assert_equal(result, expected)
result = ser + np.nan
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_numarr_with_dtype_add_int(self, dtype, box):
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([2, 3, 4], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = 1 + ser
tm.assert_equal(result, expected)
result = ser + 1
tm.assert_equal(result, expected)
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul,
operator.truediv, operator.floordiv])
def test_operators_reverse_object(self, op):
# GH#56
arr = pd.Series(np.random.randn(10), index=np.arange(10), dtype=object)
result = op(1., arr)
expected = op(1., arr.astype(float))
tm.assert_series_equal(result.astype(float), expected)
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 4 | 18398 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.preprocessing import normalize
def test_pairwise_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# manhattan does not support sparse matrices atm.
assert_raises(ValueError, pairwise_distances, csr_matrix(X),
metric="manhattan")
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for euclidean and cosine
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
def test_pairwise_parallel():
rng = np.random.RandomState(0)
for func in (np.array, csr_matrix):
X = func(rng.random_sample((5, 4)))
Y = func(rng.random_sample((3, 4)))
S = euclidean_distances(X)
S2 = _parallel_pairwise(X, None, euclidean_distances, n_jobs=-1)
assert_array_almost_equal(S, S2)
S = euclidean_distances(X, Y)
S2 = _parallel_pairwise(X, Y, euclidean_distances, n_jobs=-1)
assert_array_almost_equal(S, S2)
def test_pairwise_kernels():
""" Test the pairwise_kernels helper function. """
def callable_rbf_kernel(x, y, **kwds):
""" Callable version of pairwise.rbf_kernel. """
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_pairwise_distances_argmin_min():
""" Check pairwise minimum distances computation for any metric"""
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
assert_raises(ValueError,
pairwise_distances_argmin_min, Xsp, Ysp, metric="manhattan")
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
""" Check the pairwise Euclidean distances computation"""
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
""" Valid kernels should be symmetric"""
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity():
""" Test the cosine_similarity. """
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
""" Ensure that pairwise array check works for dense matrices."""
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
""" Ensure that if XA and XB are given correctly, they return as equal."""
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
""" Ensure an error is raised if the dimensions are different. """
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_invalid_dimensions():
""" Ensure an error is raised on 1D input arrays. """
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
""" Ensures that checks return valid sparse matrices. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
""" Turns a numpy matrix (any n-dimensional array) into tuples."""
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
""" Ensures that checks return valid tuples. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
""" Ensures that type float32 is preserved. """
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
Hbl15/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
scikit-learn-contrib/categorical-encoding | tests/test_count.py | 1 | 10805 | import pandas as pd
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders as encoders
X = pd.DataFrame({
'none': [
'A', 'A', 'B', None, None, 'C', None, 'C', None, 'B',
'A', 'A', 'C', 'B', 'B', 'A', 'A', None, 'B', None
],
'na_categorical': [
'A', 'A', 'C', 'A', 'B', 'C', 'C', 'A', np.nan, 'B', 'A',
'C', 'C', 'A', 'B', 'C', np.nan, 'A', np.nan, np.nan
]
})
X_t = pd.DataFrame({
'none': [
'A', 'C', None, 'B', 'C', 'C', None, None, 'A',
'A', 'C', 'A', 'B', 'A', 'A'
],
'na_categorical': [
'C', 'C', 'A', 'B', 'C', 'A', np.nan, 'B', 'A', 'A',
'B', np.nan, 'A', np.nan, 'A'
]
})
class TestCountEncoder(TestCase):
def test_count_defaults(self):
"""Test the defaults are working as expected on 'none' and 'categorical'
which are the most extreme edge cases for the count encoder."""
enc = encoders.CountEncoder(verbose=1)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([5, 3, 6]).isin(out['none'].unique()).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([6, 3]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(enc.mapping is not None)
def test_count_handle_missing_string(self):
"""Test the handle_missing string on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
handle_missing='return_nan'
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_missing)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 3)
self.assertTrue(pd.Series([6, 7, 3]).isin(out['na_categorical']).all())
self.assertFalse(pd.Series([4]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_missing_dict(self):
"""Test the handle_missing dict on 'none' and 'na_categorical'.
We want to see differing behavour between 'none' and 'na_cat' cols."""
enc = encoders.CountEncoder(
handle_missing={'na_categorical': 'return_nan'}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_missing)
self.assertTrue(pd.Series([5, 3, 6]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([6, 7, 3]).isin(out['na_categorical']).all())
self.assertFalse(pd.Series([4]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_unknown_string(self):
"""Test the handle_unknown string on 'none' and 'na_categorical'.
The 'handle_missing' must be set to 'return_nan' in order to test
'handle_unkown' correctly."""
enc = encoders.CountEncoder(
handle_missing='return_nan',
handle_unknown='return_nan',
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_unknown)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 3)
self.assertTrue(pd.Series([3, 6, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_unknown_dict(self):
"""Test the 'handle_unkown' dict with all non-default options."""
enc = encoders.CountEncoder(
handle_missing='return_nan',
handle_unknown={
'none': -1,
'na_categorical': 'return_nan'
},
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_unknown)
self.assertTrue(pd.Series([6, 5, 3, -1]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([3, 6, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_min_group_size_int(self):
"""Test the min_group_size int on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(min_group_size=7)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertIn(np.nan, enc.mapping['none'])
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (2,))
self.assertIn('B_C_nan', enc.mapping['na_categorical'])
self.assertFalse(np.nan in enc.mapping['na_categorical'])
def test_count_min_group_size_dict(self):
"""Test the min_group_size dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={'none': 6, 'na_categorical': 7}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._min_group_size)
self.assertTrue(pd.Series([6, 8]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertIn(np.nan, enc.mapping['none'])
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (2,))
self.assertIn('B_C_nan', enc.mapping['na_categorical'])
self.assertFalse(np.nan in enc.mapping['na_categorical'])
def test_count_combine_min_nan_groups_bool(self):
"""Test the min_nan_groups_bool on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=7,
combine_min_nan_groups=False
)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 3)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([9, 7, 4]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 3)
self.assertTrue(enc.mapping is not None)
self.assertIn(np.nan, enc.mapping['na_categorical'])
def test_count_combine_min_nan_groups_dict(self):
"""Test the combine_min_nan_groups dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={
'none': 6,
'na_categorical': 7
},
combine_min_nan_groups={
'none': 'force',
'na_categorical': False
}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._combine_min_nan_groups)
self.assertTrue(pd.Series([14, 6]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([9, 7, 4]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 3)
self.assertTrue(enc.mapping is not None)
self.assertIn(np.nan, enc.mapping['na_categorical'])
def test_count_min_group_name_string(self):
"""Test the min_group_name string on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=6,
min_group_name='dave'
)
enc.fit(X)
self.assertIn('dave', enc.mapping['none'])
self.assertEqual(enc.mapping['none']['dave'], 8)
self.assertIn('dave', enc.mapping['na_categorical'])
self.assertEqual(enc.mapping['na_categorical']['dave'], 7)
def test_count_min_group_name_dict(self):
"""Test the min_group_name dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={
'none': 6, 'na_categorical': 6
},
min_group_name={
'none': 'dave', 'na_categorical': None
}
)
enc.fit(X)
self.assertIn('none', enc._min_group_name)
self.assertIn('dave', enc.mapping['none'])
self.assertEqual(enc.mapping['none']['dave'], 8)
self.assertIn('B_nan', enc.mapping['na_categorical'])
self.assertEqual(enc.mapping['na_categorical']['B_nan'], 7)
def test_count_normalize_bool(self):
"""Test the normalize bool on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=6,
normalize=True
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._normalize)
self.assertTrue(out['none'].round(5).isin([0.3, 0.4]).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([0.3, 0.35]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 2)
self.assertTrue(enc.mapping is not None)
def test_count_normalize_dict(self):
"""Test the normalize dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=7,
normalize={
'none': True, 'na_categorical': False
}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._normalize)
self.assertTrue(out['none'].round(5).isin([0.3 , 0.15, 0.25]).all())
self.assertEqual(out['none'].unique().shape[0], 3)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 2)
self.assertTrue(enc.mapping is not None)
| bsd-3-clause |
zihua/scikit-learn | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
weinbe58/QuSpin | examples/scripts/example3.py | 3 | 5466 | from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
########################################################################################
# example 3 #
# In this example we show how to use the photon_basis class to study spin chains #
# coupled to a single photon mode. To demonstrate this we simulate a single spin #
# and show how the semi-classical limit emerges in the limit that the number of #
# photons goes to infinity. #
########################################################################################
from quspin.basis import spin_basis_1d,photon_basis # Hilbert space bases
from quspin.operators import hamiltonian # Hamiltonian and observables
from quspin.tools.measurements import obs_vs_time # t_dep measurements
from quspin.tools.Floquet import Floquet,Floquet_t_vec # Floquet Hamiltonian
from quspin.basis.photon import coherent_state # HO coherent state
import numpy as np # generic math functions
#
##### define model parameters #####
Nph_tot=60 # maximum photon occupation
Nph=Nph_tot/2 # mean number of photons in initial coherent state
Omega=3.5 # drive frequency
A=0.8 # spin-photon coupling strength (drive amplitude)
Delta=1.0 # difference between atom energy levels
#
##### set up photon-atom Hamiltonian #####
# define operator site-coupling lists
ph_energy=[[Omega]] # photon energy
at_energy=[[Delta,0]] # atom energy
absorb=[[A/(2.0*np.sqrt(Nph)),0]] # absorption term
emit=[[A/(2.0*np.sqrt(Nph)),0]] # emission term
# define static and dynamics lists
static=[["|n",ph_energy],["x|-",absorb],["x|+",emit],["z|",at_energy]]
dynamic=[]
# compute atom-photon basis
basis=photon_basis(spin_basis_1d,L=1,Nph=Nph_tot)
# compute atom-photon Hamiltonian H
H=hamiltonian(static,dynamic,dtype=np.float64,basis=basis)
#
##### set up semi-classical Hamiltonian #####
# define operators
dipole_op=[[A,0]]
# define periodic drive and its parameters
def drive(t,Omega):
return np.cos(Omega*t)
drive_args=[Omega]
# define semi-classical static and dynamic lists
static_sc=[["z",at_energy]]
dynamic_sc=[["x",dipole_op,drive,drive_args]]
# compute semi-classical basis
basis_sc=spin_basis_1d(L=1)
# compute semi-classical Hamiltonian H_{sc}(t)
H_sc=hamiltonian(static_sc,dynamic_sc,dtype=np.float64,basis=basis_sc)
#
##### define initial state #####
# define atom ground state
#psi_at_i=np.array([1.0,0.0]) # spin-down eigenstate of \sigma^z in QuSpin 0.2.3 or older
psi_at_i=np.array([0.0,1.0]) # spin-down eigenstate of \sigma^z in QuSpin 0.2.6 or newer
# define photon coherent state with mean photon number Nph
psi_ph_i=coherent_state(np.sqrt(Nph),Nph_tot+1)
# compute atom-photon initial state as a tensor product
psi_i=np.kron(psi_at_i,psi_ph_i)
#
##### calculate time evolution #####
# define time vector over 30 driving cycles with 100 points per period
t=Floquet_t_vec(Omega,30) # t.i = initial time, t.T = driving period
# evolve atom-photon state with Hamiltonian H
psi_t=H.evolve(psi_i,t.i,t.vals,iterate=True,rtol=1E-9,atol=1E-9)
# evolve atom GS with semi-classical Hamiltonian H_sc
psi_sc_t=H_sc.evolve(psi_at_i,t.i,t.vals,iterate=True,rtol=1E-9,atol=1E-9)
#
##### define observables #####
# define observables parameters
obs_args={"basis":basis,"check_herm":False,"check_symm":False}
obs_args_sc={"basis":basis_sc,"check_herm":False,"check_symm":False}
# in atom-photon Hilbert space
n=hamiltonian([["|n", [[1.0 ]] ]],[],dtype=np.float64,**obs_args)
sz=hamiltonian([["z|",[[1.0,0]] ]],[],dtype=np.float64,**obs_args)
sy=hamiltonian([["y|", [[1.0,0]] ]],[],dtype=np.complex128,**obs_args)
# in the semi-classical Hilbert space
sz_sc=hamiltonian([["z",[[1.0,0]] ]],[],dtype=np.float64,**obs_args_sc)
sy_sc=hamiltonian([["y",[[1.0,0]] ]],[],dtype=np.complex128,**obs_args_sc)
#
##### calculate expectation values #####
# in atom-photon Hilbert space
Obs_t = obs_vs_time(psi_t,t.vals,{"n":n,"sz":sz,"sy":sy})
O_n, O_sz, O_sy = Obs_t["n"], Obs_t["sz"], Obs_t["sy"]
# in the semi-classical Hilbert space
Obs_sc_t = obs_vs_time(psi_sc_t,t.vals,{"sz_sc":sz_sc,"sy_sc":sy_sc})
O_sz_sc, O_sy_sc = Obs_sc_t["sz_sc"], Obs_sc_t["sy_sc"]
##### plot results #####
import matplotlib.pyplot as plt
import pylab
# define legend labels
str_n = "$\\langle n\\rangle,$"
str_z = "$\\langle\\sigma^z\\rangle,$"
str_x = "$\\langle\\sigma^x\\rangle,$"
str_z_sc = "$\\langle\\sigma^z\\rangle_\\mathrm{sc},$"
str_x_sc = "$\\langle\\sigma^x\\rangle_\\mathrm{sc}$"
# plot spin-photon data
fig = plt.figure()
plt.plot(t.vals/t.T,O_n/Nph,"k",linewidth=1,label=str_n)
plt.plot(t.vals/t.T,O_sz,"c",linewidth=1,label=str_z)
plt.plot(t.vals/t.T,O_sy,"tan",linewidth=1,label=str_x)
# plot semi-classical data
plt.plot(t.vals/t.T,O_sz_sc,"b.",marker=".",markersize=1.8,label=str_z_sc)
plt.plot(t.vals/t.T,O_sy_sc,"r.",marker=".",markersize=2.0,label=str_x_sc)
# label axes
plt.xlabel("$t/T$",fontsize=18)
# set y axis limits
plt.ylim([-1.1,1.4])
# display legend horizontally
plt.legend(loc="upper right",ncol=5,columnspacing=0.6,numpoints=4)
# update axis font size
plt.tick_params(labelsize=16)
# turn on grid
plt.grid(True)
# save figure
plt.tight_layout()
plt.savefig('example3.pdf', bbox_inches='tight')
# show plot
#plt.show()
plt.close() | bsd-3-clause |
dyoung418/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 86 | 4503 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
giorgiop/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
aev3/trading-with-python | lib/yahooFinance.py | 76 | 8290 | # -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <jev.kuznetsov@gmail.com>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
| bsd-3-clause |
awacha/sastool | tests/centering/beamfinding_test_evaluate.py | 1 | 1581 | import os
import numpy as np
import matplotlib.pyplot as plt
import re
import sastool
import matplotlib
matplotlib.rcParams['font.size']=8
xmin=40
xmax=210
ymin=40
ymax=210
modes=['slice','azim','azimfold']
bcxfiles=[f for f in os.listdir('.') if re.match('bcx[a-z]+_([0-9]+).npy',f)]
print(bcxfiles)
fsns=set([re.match('bcx[a-z]+_([0-9]+).npy',f).group(1) for f in bcxfiles])
print(fsns)
plt.figure(figsize=(11,7),dpi=80)
for f in fsns:
f=int(f)
print(f)
data,header=sastool.io.b1.read2dB1data(f,'ORG%05d','.')
plt.clf()
plt.subplot(3,5,1)
plt.imshow(data[0],interpolation='nearest')
modeidx=0
for m,i in zip(modes,list(range(len(modes)))):
bcx=np.load('bcx%s_%d.npy'%(m,f))
bcy=np.load('bcy%s_%d.npy'%(m,f))
dist=np.load('dist%s_%d.npy'%(m,f))
xtime=np.load('time%s_%d.npy'%(m,f))
plt.subplot(3,5,i*5+2)
plt.imshow(bcx-np.mean(bcx[np.isfinite(bcx)]),interpolation='nearest')
plt.axis((xmin-2,xmax+2,ymin-2,ymax+2))
plt.colorbar()
plt.subplot(3,5,i*5+3)
plt.imshow(bcy-np.mean(bcy[np.isfinite(bcy)]),interpolation='nearest')
plt.axis((xmin-2,xmax+2,ymin-2,ymax+2))
plt.colorbar()
plt.subplot(3,5,i*5+4)
plt.imshow(dist,interpolation='nearest')
plt.axis((xmin-2,xmax+2,ymin-2,ymax+2))
plt.colorbar()
plt.subplot(3,5,i*5+5)
plt.imshow(xtime,interpolation='nearest')
plt.axis((xmin-2,xmax+2,ymin-2,ymax+2))
plt.colorbar()
print("Saving image")
plt.savefig('bftest_%d.pdf'%f)
| bsd-3-clause |
bakfu/bakfu | bakfu/process/vectorize/vec_sklearn.py | 2 | 3972 | # -*- coding: utf-8 -*-
'''
This is an interface to sklearn vectorizer classes.
'''
import sklearn
from sklearn.feature_extraction.text import CountVectorizer as SKCountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer as SKTfidfVectorizer
from ...core.routes import register
from .base import BaseVectorizer
import nltk.corpus
nltk.corpus.stopwords.words("english")
@register('vectorize.sklearn')
class CountVectorizer(BaseVectorizer):
'''
sklearn CountVectorizer
action : fit, transform, or fit_transform
if transform, the CountVectorizer will act as a proxy to the previous instance
'''
init_args = ()
init_kwargs = ('ngram_range', 'min_df',
'max_features', 'stop_words',
'tokenizer',
'action')
run_args = ()
run_kwargs = ()
def __init__(self, *args, **kwargs):
super(CountVectorizer, self).__init__(*args, **kwargs)
self.action = kwargs.get('action','fit_transform')
if self.action == 'transform':
self.vectorizer = None
else:
self.vectorizer = SKCountVectorizer(*args, **kwargs)
def vectorize(self, data_source, *args, **kwargs):
'''
Calls fit_transform or transform.
'''
# If vectorizer has already been use, reuse it for the new data set
if hasattr(self.vectorizer, 'vocabulary_'):
result = self.vectorizer.transform(data_source.get_data(),
*args, **kwargs)
else:
result = self.vectorizer.fit_transform(data_source.get_data(),
*args, **kwargs)
self.results = {'vectorizer':result}
return result
def run(self, caller, *args, **kwargs):
super(CountVectorizer, self).run(caller, *args, **kwargs)
data_source = caller.get_chain('data_source')
if self.action == 'transform':
self.vectorizer = caller.get_chain('vectorizer')
result = self.vectorizer.transform(data_source.get_data())
elif self.action == 'fit':
result = self.vectorizer.fit(data_source.get_data())
elif self.action == 'fit_transform':
result = self.vectorizer.fit_transform(data_source.get_data())
self.results = {'vectorizer':self.vectorizer, 'data':result}
caller.data['vectorizer'] = self.vectorizer
caller.data['result'] = result
caller.data['vectorizer_result'] = result
self.update(
result=result,
vectorizer_result=result,
vectorizer=self.vectorizer
)
return self
@classmethod
def init_run(cls, caller, *args, **kwargs):
'''
By default, args and kwargs are used when creating Vectorizer
Chain().load('data.simple',data).
process('vectorize.sklearn',_init={'ngram_range':(2,5)}).data
Chain().load('data.simple',data) \
.process('vectorize.sklearn', \
_init=((),{'ngram_range':(2,5)}),\
_run=((),{}))
'''
if '_init' in kwargs or '_run' in kwargs:
return CountVectorizer.init_run_static(CountVectorizer,
caller,
*args, **kwargs)
obj = cls(*args, **kwargs)
obj.run(caller)
return obj
@register('vectorize.tfidf')
class TfIdfVectorizer(CountVectorizer):
'''
sklearn CountVectorizer
'''
init_args = ()
init_kwargs = ('ngram_range', 'min_df',
'max_features', 'stop_words',
'tokenizer')
run_args = ()
run_kwargs = ()
def __init__(self, *args, **kwargs):
super(TfIdfVectorizer, self).__init__(*args, **kwargs)
self.vectorizer = SKTfidfVectorizer(*args, **kwargs) | bsd-3-clause |
bhargav/scikit-learn | examples/linear_model/plot_theilsen.py | 100 | 3846 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| bsd-3-clause |
scikit-nano/scikit-nano | setup.py | 2 | 10936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python toolkit for generating and analyzing nanostructure data"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
import os
import sys
import shutil
import subprocess
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
raise RuntimeError("Python version 3.4+ required.\n\n"
"Sorry, but there are features of Python 3\n"
"that I want to take advantage of and without\n"
"worrying about Python 2 compatibility.\n"
"Therefore, Python 2 support was removed starting\n"
"in v0.3.7. Once/if I learn how to automate the\n"
"backporting process from the setup script,\n"
"I will restore Python 2 support that way.\n"
"Until then, if you must install this for Python 2\n"
"you're on your own. It shouldn't be difficult\n"
"but you'll have to manually backport the package\n"
"source code using a Python 3 to Python 2\n"
"compatibility library such as the python `future`\n"
"module, which provides a python script called\n"
"`pasteurize` that can be run on the source\n"
"directory to automate the backporting process.\n"
"You'll also need to hack this setup script\n"
"to remove any exceptions that are raised when\n"
"executed under Python 2.")
#if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4):
if (3, 0) <= sys.version_info[:2] < (3, 4):
raise RuntimeError("Python 3.4+ required.")
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
import setuptools
except ImportError:
sys.exit("setuptools required for Python3 install.\n"
"`pip install --upgrade setuptools`")
DISTNAME = 'scikit-nano'
DESCRIPTION = __doc__
LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:])
AUTHOR = 'Andrew Merrill'
AUTHOR_EMAIL = 'androomerrill@gmail.com'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
URL = 'http://scikit-nano.org/doc'
DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano'
KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure',
'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure',
'analysis']
LICENSE = 'BSD 2-Clause'
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
Programming Language :: Python
Programming Language :: Python :: 3.4
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Physics
Topic :: Scientific/Engineering :: Visualization
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
"""
MAJOR = 0
MINOR = 3
MICRO = 21
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
STABLEVERSION = None
if STABLEVERSION is None:
if ISRELEASED:
STABLEVERSION = VERSION
else:
STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1)
# Return the GIT version as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit (!) hackish: we are setting a global variable so that the main
# sknano __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet.
builtins.__SKNANO_SETUP__ = True
class CleanCommand(Clean):
description = \
"Remove build directories, __pycache__ directories, " \
".ropeproject directories, and compiled files in the source tree."
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sknano'):
for filename in filenames:
if filename.endswith(('.so', '.pyd', '.pyc', '.dll')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname in ('__pycache__', '.ropeproject'):
shutil.rmtree(os.path.join(dirpath, dirname))
for dirpath, dirnames, filenames in os.walk('doc'):
for dirname in dirnames:
if dirname in ('__pycache__', '.ropeproject'):
shutil.rmtree(os.path.join(dirpath, dirname))
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of sknano.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('sknano/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load sknano/__init__.py
import imp
version = imp.load_source('sknano.version', 'sknano/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
# FULLVERSION += '.dev'
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='sknano/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
stable_version = '%(stable_version)s'
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED),
'stable_version': STABLEVERSION})
finally:
a.close()
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sknano')
config.get_version('sknano/version.py')
return config
def setup_package():
# Rewrite the version file everytime
write_version_py()
# Figure out whether to add ``*_requires = ['numpy>=`min version`',
# 'scipy>=`min version`']``. We don't want to do that unconditionally,
# because we risk updating an installed numpy/scipy which fails too often.
# Just if the minimum version is not installed, we may give it a try.
build_requires = []
try:
import numpy
numpy_version = \
tuple(
list(map(int, numpy.version.short_version.split('.')[:3]))[:2])
if numpy_version < (1, 9):
raise RuntimeError
except (AttributeError, ImportError, RuntimeError):
build_requires += ['numpy==1.10.1']
install_requires = build_requires[:]
try:
import scipy
scipy_version = \
tuple(
list(map(int, scipy.version.short_version.split('.')[:3]))[:2])
if scipy_version < (0, 14):
raise RuntimeError
except (AttributeError, ImportError, RuntimeError):
install_requires += ['scipy==0.16.1']
# # Add six module to install_requires (used in numpydoc git submodule)
# install_requires += ['six>=1.9']
# # Add future module to install requires
# install_requires += ['future>=0.14.3']
install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4']
metadata = dict(
name=DISTNAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
keywords=KEYWORDS,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
setup_requires=build_requires,
install_requires=install_requires,
extras_require={
'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1']
},
entry_points={
'console_scripts': [
'analyze_structure = sknano.scripts.analyze_structure:main',
'nanogen = sknano.scripts.nanogen:main',
'nanogenui = sknano.scripts.nanogenui:main',
'sknano = sknano.scripts.sknano:main'],
},
cmdclass={'clean': CleanCommand},
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
if len(sys.argv) >= 2 and \
('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean')):
# For these actions, NumPy/SciPy are not required.
# They are required to succeed without them when, for example,
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
FULLVERSION, GIT_REVISION = get_version_info()
metadata['version'] = FULLVERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == '__main__':
setup_package()
| bsd-2-clause |
poppingtonic/BayesDB | bayesdb/tests/experiments/estimate_the_full_joint_dist.py | 2 | 6142 | #
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Lead Developers: Jay Baxter and Dan Lovell
# Authors: Jay Baxter, Dan Lovell, Baxter Eaves, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib
matplotlib.use('Agg')
from bayesdb.client import Client
import experiment_utils as eu
import random
import numpy
import pylab
import time
import os
def run_experiment(argin):
num_iters = argin["num_iters"]
num_chains = argin["num_chains"]
num_rows = argin["num_rows"]
num_cols = argin["num_cols"]
num_views = argin["num_views"]
num_clusters = argin["num_clusters"]
separation = argin["separation"]
seed = argin["seed"]
ct_kernel = argin["ct_kernel"]
if seed > 0:
random.seed(seed)
argin['cctypes'] = ['continuous']*num_cols
argin['separation'] = [argin['separation']]*num_views
# have to generate synthetic data
filename = "exp_estimate_joint_ofile.csv"
table_name = 'exp_estimate_joint'
# generate starting data
T_o, structure = eu.gen_data(filename, argin, save_csv=True)
# generate a new csv with bottom row removed (held-out data)
data_filename = 'exp_estimate_joint.csv'
T_h = eu.gen_held_out_data(filename, data_filename, 1)
# get the column names
with open(filename, 'r') as f:
csv_header = f.readline()
col_names = csv_header.split(',')
col_names[-1] = col_names[-1].strip()
# set up a dict fro the different config data
result = dict()
true_held_out_p = []
for col in range(num_cols):
x = T_o[-1,col]
logp = eu.get_true_logp(numpy.array([x]), col, structure)
true_held_out_p.append(numpy.exp(logp))
# start a client
client = Client()
# do analyses
for config in ['cc', 'crp', 'nb']:
config_string = eu.config_map[config]
table = table_name + '-' + config
# drop old btable, create a new one with the new data and init models
client('DROP BTABLE %s;' % table, yes=True)
client('CREATE BTABLE %s FROM %s;' % (table, data_filename))
client('INITIALIZE %i MODELS FOR %s %s;' % (num_chains, table, config_string))
these_ps = numpy.zeros(num_iters)
these_ps_errors = numpy.zeros(num_iters)
for i in range(num_iters):
if ct_kernel == 1:
client('ANALYZE %s FOR 1 ITERATIONS WITH MH KERNEL WAIT;' % table )
else:
client('ANALYZE %s FOR 1 ITERATIONS WAIT;' % table )
# imput each index in indices and calculate the squared error
mean_p = []
mean_p_error = []
for col in range(0,num_cols):
col_name = col_names[col]
x = T_o[-1,col]
out = client('SELECT PROBABILITY OF %s=%f from %s;' % (col_name, x, table), pretty=False, pandas_output=False)
p = out[0]['data'][0][1]
mean_p.append(p)
mean_p_error.append( (true_held_out_p[col]-p)**2.0 )
these_ps[i] = numpy.mean(mean_p)
these_ps_errors[i] = numpy.mean(mean_p_error)
key_str_p = 'mean_held_out_p_' + config
key_str_error = 'mean_error_' + config
result[key_str_p] = these_ps
result[key_str_error] = these_ps_errors
retval = dict()
retval['MSE_naive_bayes_indexer'] = result['mean_error_nb']
retval['MSE_crp_mixture_indexer'] = result['mean_error_crp']
retval['MSE_crosscat_indexer'] = result['mean_error_cc']
retval['MEAN_P_naive_bayes_indexer'] = result['mean_held_out_p_nb']
retval['MEAN_P_crp_mixture_indexer'] = result['mean_held_out_p_crp']
retval['MEAN_P_crosscat_indexer'] = result['mean_held_out_p_cc']
retval['config'] = argin
return retval
def gen_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--num_iters', default=100, type=int)
parser.add_argument('--num_chains', default=20, type=int)
parser.add_argument('--num_rows', default=300, type=int)
parser.add_argument('--num_cols', default=20, type=int)
parser.add_argument('--num_clusters', default=8, type=int)
parser.add_argument('--num_views', default=4, type=int)
parser.add_argument('--separation', default=.9, type=float) # separation (0-1) between clusters
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--ct_kernel', default=0, type=int) # 0 for gibbs w for MH
parser.add_argument('--no_plots', action='store_true')
return parser
if __name__ == "__main__":
import argparse
import experiment_runner.experiment_utils as eru
from experiment_runner.ExperimentRunner import ExperimentRunner, propagate_to_s3
parser = gen_parser()
args = parser.parse_args()
argsdict = eu.parser_args_to_dict(args)
generate_plots = not argsdict['no_plots']
results_filename = 'estimate_the_full_joint_results'
dirname_prefix = 'estimate_the_full_joint'
er = ExperimentRunner(run_experiment, dirname_prefix=dirname_prefix, bucket_str='experiment_runner', storage_type='fs')
er.do_experiments([argsdict])
if generate_plots:
for id in er.frame.index:
result = er._get_result(id)
this_dirname = eru._generate_dirname(dirname_prefix, 10, result['config'])
filename_img = os.path.join(dirname_prefix, this_dirname, results_filename+'.png')
eu.plot_estimate_the_full_joint(result, filename=filename_img)
pass
pass
| apache-2.0 |
chrishavlin/nyc_taxi_viz | src/taxi_main.py | 1 | 12620 | """
taxi_main.py
module for loading the raw csv taxi files.
Copyright (C) 2016 Chris Havlin, <https://chrishavlin.wordpress.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The database is NOT distributed with the code here.
Data source:
NYC Taxi & Limousine Commision, TLC Trip Record Data
<http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml>
"""
"""--------------
Import libraries:
-----------------"""
import numpy as np
import time,os
import matplotlib.pyplot as plt
from matplotlib import cm
import taxi_plotmod as tpm
import datetime as dt
"""---------
Functions
------------"""
def read_all_variables(f,there_is_a_header,VarImportList):
"""
reads in the raw data from a single file
input:
f file object
there_is_a_header logical flag
VarImportList a list of strings identifying which
data to read in and save
possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
'drop_lat','elapsed_time_min'
output:
Vars a 2D array, each row is a single taxi
pickup instance, each column is a different
Var_list a list of strings where the index of each
entry corresponds to the column of Vars
"""
# count number of lines
indx=0
for line in f:
indx=indx+1
if there_is_a_header:
indx = indx-1
Nlines = indx
# Initizialize Variable Array and List
N_VarImport=len(VarImportList)
Date=np.empty(Nlines,dtype='datetime64[D]')
Vars=np.zeros((indx,N_VarImport))
Var_list=[None] * N_VarImport
# Go back to start of file, loop again to read variables
f.seek(0)
if there_is_a_header:
headerline=f.readline()
indx=0
# loop over lines, store variables
prevprog=0
zero_lines=0
for line in f:
prog= round(float(indx) / float(Nlines-1) * 100)
if prog % 5 == 0 and prog != prevprog and Nlines > 500:
print ' ',int(prog),'% of file read ...'
prevprog=prog
line = line.rstrip()
line = line.split(',')
var_indx = 0
if len(line) == 19:
dates=line[1].split()[0] # the date string, "yyyy-mm-dd"
#dates=dates.split('-')
#dtim=dt.date(int(dates[0]),int(dates[1]),int(dates[2]))
#Date.append(dtim)
Date[indx]=np.datetime64(dates)
if 'pickup_time_hr' in VarImportList:
Vars[indx,var_indx]=datetime_string_to_time(line[1],'hr')
Var_list[var_indx]='pickup_time_hr'
var_indx=var_indx+1
# Vars[indx,var_indx]=np.datetime64(dates)
# Var_list[var_indx]='date'
# var_indx=var_indx+1
if 'dropoff_time_hr' in VarImportList:
Vars[indx,var_indx]=datetime_string_to_time(line[2],'hr')
Var_list[var_indx]='dropoff_time_hr'
var_indx=var_indx+1
if 'dist_mi' in VarImportList:
Vars[indx,var_indx]=float(line[4]) # distance travelled [mi]
Var_list[var_indx]='dist_mi'
var_indx=var_indx+1
if 'elapsed_time_min' in VarImportList:
pickup=datetime_string_to_time(line[1],'hr')*60.0
drop=datetime_string_to_time(line[2],'hr')*60.0
if drop >= pickup:
Vars[indx,var_indx]=drop - pickup
elif drop < pickup:
#print 'whoops:',pickup/60,drop/60,(drop+24*60.-pickup)/60
Vars[indx,var_indx]=drop+24.0*60.0 - pickup
Var_list[var_indx]='elapsed_time_min'
var_indx=var_indx+1
if 'speed_mph' in VarImportList:
pickup=datetime_string_to_time(line[1],'min')
drop=datetime_string_to_time(line[2],'min')
dist=float(line[4]) # [mi]
if drop > pickup:
speed=dist / ((drop - pickup)/60.0) # [mi/hr]
elif drop < pickup:
dT=(drop+24.0*60.0 - pickup)/60.0
speed=dist / dT # [mi/hr]
else:
speed=0
Vars[indx,var_indx]=speed
Var_list[var_indx]='speed_mph'
var_indx=var_indx+1
if 'pickup_lat' in VarImportList:
Vars[indx,var_indx]=float(line[6])
Var_list[var_indx]='pickup_lat'
var_indx=var_indx+1
if 'pickup_lon' in VarImportList:
Vars[indx,var_indx]=float(line[5])
Var_list[var_indx]='pickup_lon'
var_indx=var_indx+1
if 'drop_lat' in VarImportList:
Vars[indx,var_indx]=float(line[10])
Var_list[var_indx]='drop_lat'
var_indx=var_indx+1
if 'drop_lon' in VarImportList:
Vars[indx,var_indx]=float(line[9])
Var_list[var_indx]='drop_lon'
var_indx=var_indx+1
if 'psgger' in VarImportList:
Vars[indx,var_indx]=float(line[3])
Var_list[var_indx]='pssger'
var_indx=var_indx+1
if 'fare' in VarImportList:
Vars[indx,var_indx]=float(line[12])
Var_list[var_indx]='fare'
var_indx=var_indx+1
if 'tips' in VarImportList:
Vars[indx,var_indx]=float(line[15])
Var_list[var_indx]='tips'
var_indx=var_indx+1
if 'payment_type' in VarImportList:
Vars[indx,var_indx]=float(line[11])
Var_list[var_indx]='payment_type'
var_indx=var_indx+1
indx=indx+1
else:
zero_lines=zero_lines+1
# remove zero lines, which will be padded at end
if zero_lines>0:
Vars=Vars[0:Nlines-zero_lines,:]
Date=Date[0:Nlines-zero_lines]
return Vars,Var_list,Date
def datetime_string_to_time(dt_string,time_units):
""" converts datetime string to time in units of time_units
dt_string should be in datetime format: "yyyy-mm-dd hh:mm:ss"
"2016-04-18 18:31:43"
"""
t_string=dt_string.split()[1] # remove the space, take the time string
t_hms=t_string.split(':') # split into hr, min, sec
# unit conversion factors depending on time_units:
if time_units == 'hr':
a = [1.0, 1.0/60.0, 1.0/3600.0]
elif time_units == 'min':
a = [60.0, 1.0, 1.0/60.0]
elif time_units == 'sec':
a = [3600.0, 60.0, 1.0]
time_flt=float(t_hms[0])*a[0]+float(t_hms[1])*a[1]+float(t_hms[2])*a[2]
return time_flt
def read_taxi_files(dir_base,Vars_To_Import):
""" loops over all taxi files in a directory, stores them in memory
input:
dir_base the directory to look for .csv taxi files
Vars_to_Import a list of strings identifying which data to read in and save
possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
'drop_lat','elapsed_time_min'
output:
VarBig a 2D array, each row is a single taxi pickup instance, each column
is a different variable. Data aggregated from all files in directory.
Var_list a list of strings where the index of each entry corresponds to the
column of Vars
"""
N_files=len(os.listdir(dir_base)) # number of files in directory
ifile = 1 # file counter
Elapsed_tot=0 # time counter
#Dates=[]
for fn in os.listdir(dir_base): # loop over directory contents
if os.path.isfile(dir_base+fn): # is the current path obect a file?
flnm=dir_base + fn # construct the file name
print 'Reading File ', ifile,' of ', N_files
start = time.clock() # start timer
fle = open(flnm, 'r') # open the file for reading
# distribute current file to lat/lon bins:
VarChunk,Var_list,DateChunk=read_all_variables(fle,True,Vars_To_Import)
if ifile == 1:
VarBig = VarChunk
Dates=DateChunk#np.array([tuple(DateChunk)], dtype='datetime64[D]')
print Dates.shape,DateChunk.shape,VarChunk.shape
#Dates.extend(DateChunk)
else:
VarBig = np.vstack((VarBig,VarChunk))
#DateChunk=np.array([tuple(DateChunk)],dtype='datetime64[D]')
print Dates.shape,DateChunk.shape,VarChunk.shape
Dates = np.concatenate((Dates,DateChunk))
#Dates.extend(DateChunk)
elapsed=(time.clock()-start) # elapsed time
Elapsed_tot=Elapsed_tot+elapsed # cumulative elapsed
MeanElapsed=Elapsed_tot/ifile # mean time per file
Fls_left=N_files-(ifile) # files remaining
time_left=Fls_left*MeanElapsed/60 # estimated time left
print ' aggregation took %.1f sec' % elapsed
print ' estimated time remaning: %.1f min' % time_left
fle.close() # close current file
ifile = ifile+1 # increment file counter
return VarBig,Var_list,Dates
def write_gridded_file(write_dir,Var,VarCount,x,y,Varname):
""" writes out the spatially binned data """
if not os.path.exists(write_dir):
os.makedirs(write_dir)
f_base=write_dir+'/'+Varname
np.savetxt(f_base +'.txt', Var, delimiter=',')
np.savetxt(f_base +'_Count.txt', VarCount, delimiter=',')
np.savetxt(f_base+'_x.txt', x, delimiter=',')
np.savetxt(f_base+'_y.txt', y, delimiter=',')
def read_gridded_file(read_dir,Varname):
""" reads in the spatially binned data """
f_base=read_dir+'/'+Varname
Var=np.loadtxt(f_base +'.txt',delimiter=',')
VarCount=np.loadtxt(f_base +'_Count.txt',delimiter=',')
x=np.loadtxt(f_base+'_x.txt',delimiter=',')
y=np.loadtxt(f_base+'_y.txt',delimiter=',')
return Var,VarCount,x,y
def write_taxi_count_speed(write_dir,V1,V1name,V2,V2name,V3,V3name):
""" writes out the spatially binned data """
if not os.path.exists(write_dir):
os.makedirs(write_dir)
f_base=write_dir+'/'
np.savetxt(f_base + V1name + '.txt', V1, delimiter=',')
np.savetxt(f_base + V2name + '.txt', V2, delimiter=',')
np.savetxt(f_base + V3name + '.txt', V3, delimiter=',')
def read_taxi_count_speed(read_dir,Varname):
""" reads in the spatially binned data """
f_base=read_dir+'/'+Varname
Var=np.loadtxt(f_base +'.txt',delimiter=',')
return Var
""" END OF FUNCTIONS """
if __name__ == '__main__':
""" a basic example of reading, processing and plotting some taxi files """
# the directory with the data
dir_base='../data_sub_sampled/'
# choose which variables to import
# possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
# 'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
# 'drop_lat','elapsed_time_min'
Vars_To_Import=['dist_mi','pickup_lon','pickup_lat']
# read in all the data!
VarBig,Var_list=read_taxi_files(dir_base,Vars_To_Import)
# now bin the point data!
DistCount,DistMean,Distx,Disty=tpm.map_proc(VarBig,Var_list,'dist_mi',0.1,60,'True',600,700)
write_gridded_file('../data_products/',DistMean,DistCount,Distx,Disty,'dist_mi')
tpm.plt_map(DistCount,1,1000,Distx,Disty,True)
| gpl-3.0 |
ettm2012/MissionPlanner | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-3.0 |
maximus009/kaggle-galaxies | predict_augmented_npy_maxout2048_extradense_pysexgen1_dup.py | 7 | 9736 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
hdmetor/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/plotting/common.py | 6 | 19480 | #!/usr/bin/env python
# coding: utf-8
import pytest
import os
import warnings
from pandas import DataFrame, Series
from pandas.compat import zip, iteritems
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.api import is_list_like
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_is_valid_plot_return_object)
import numpy as np
from numpy import random
import pandas.plotting as plotting
from pandas.plotting._tools import _flatten
"""
This is a common base class used for various plotting tests
"""
tm._skip_module_if_no_mpl()
def _skip_if_no_scipy_gaussian_kde():
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
pytest.skip("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde', 'density']:
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
return False
return True
class TestPlotBase(object):
def setup_method(self, method):
import matplotlib as mpl
mpl.rcdefaults()
self.mpl_le_1_2_1 = plotting._compat._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._compat._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._compat._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0()
self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0()
self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
if self.mpl_ge_2_0_0:
self.default_figsize = (6.4, 4.8)
else:
self.default_figsize = (8.0, 6.0)
self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default'
# common test data
from pandas import read_csv
base = os.path.join(os.path.dirname(curpath()), os.pardir)
path = os.path.join(base, 'tests', 'data', 'iris.csv')
self.iris = read_csv(path)
n = 100
with tm.RNGContext(42):
gender = np.random.choice(['Male', 'Female'], size=n)
classroom = np.random.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
def teardown_method(self, method):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is
True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
assert ax.get_legend() is not None
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
assert ax.get_legend() is None
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
tm.assert_almost_equal(xpdata, rsdata)
assert len(xp_lines) == len(rs_lines)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections,
Collection) and not is_list_like(collections):
collections = [collections]
for patch in collections:
assert patch.get_visible() == visible
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import (
Collection, PolyCollection, LineCollection
)
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
assert len(collections) == len(linecolors)
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, (PolyCollection, LineCollection)):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
assert result == expected
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
assert len(collections) == len(facecolors)
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
assert result == expected
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not is_list_like(texts):
assert texts.get_text() == expected
else:
labels = [t.get_text() for t in texts]
assert len(labels) == len(expected)
for l, e in zip(labels, expected):
assert l == e
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not
# retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(
minor=True)
for label in labels:
if xlabelsize is not None:
tm.assert_almost_equal(label.get_fontsize(),
xlabelsize)
if xrot is not None:
tm.assert_almost_equal(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(
minor=True)
for label in labels:
if ylabelsize is not None:
tm.assert_almost_equal(label.get_fontsize(),
ylabelsize)
if yrot is not None:
tm.assert_almost_equal(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
assert ax.xaxis.get_scale() == xaxis
assert ax.yaxis.get_scale() == yaxis
def _check_axes_shape(self, axes, axes_num=None, layout=None,
figsize=None):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to
invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
if figsize is None:
figsize = self.default_figsize
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
assert len(visible_axes) == axes_num
for ax in visible_axes:
# check something drawn on visible axes
assert len(ax.get_children()) > 0
if layout is not None:
result = self._get_axes_layout(_flatten(axes))
assert result == layout
tm.assert_numpy_array_equal(
visible_axes[0].figure.get_size_inches(),
np.array(figsize, dtype=np.float64))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = _flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
assert xerr == xerr_count
assert yerr == yerr_count
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
assert isinstance(returned, types[return_type])
if return_type == 'both':
assert isinstance(returned.ax, Axes)
assert isinstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
assert isinstance(r, Axes)
return
assert isinstance(returned, Series)
assert sorted(returned.keys()) == sorted(expected_keys)
for key, value in iteritems(returned):
assert isinstance(value, types[return_type])
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
assert value.get_title() == key
elif return_type == 'both':
if check_ax_title:
assert value.ax.get_title() == key
assert isinstance(value.ax, Axes)
assert isinstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
axes = line.axes if self.mpl_ge_1_5_0 else line.get_axes()
if check_ax_title:
assert axes.get_title() == key
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xoff = all(not g.gridOn
for g in self.plt.gca().xaxis.get_major_ticks())
yoff = all(not g.gridOn
for g in self.plt.gca().yaxis.get_major_ticks())
return not (xoff and yoff)
spndx = 1
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, **kws)
assert not is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, grid=False, **kws)
assert not is_grid_on()
if kind != 'pie':
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, **kws)
assert is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, grid=True, **kws)
assert is_grid_on()
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
Compat layer for MPL 1.5 change to color cycle
Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
"""
if self.mpl_ge_1_5_0:
cyl = rcParams['axes.prop_cycle']
colors = [v[field] for v in cyl]
else:
colors = rcParams['axes.color_cycle']
return colors
def _check_plot_works(f, filterwarnings='always', **kwargs):
import matplotlib.pyplot as plt
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211)) # noqa
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(**kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
| mit |
BiaDarkia/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
Adai0808/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
markovianlabs/pychain | src/mcmc.py | 1 | 6943 | """
Markov Chain Monte Carlo (MCMC) method using Metropolis-Hastings algorithm.
Copyright: MarkovianLabs
"""
import numpy as np
import matplotlib.pyplot as plt
from sys import exit
#----------------------------------------------------------
__author__ = ("Irshad Mohammed <creativeishu@gmail.com>")
#----------------------------------------------------------
class MCMC(object):
"""
Implements single MCMC using MH algorithm.
Parameters
----------
TargetAcceptedPoints (Integer): Targeted accepted points.
NumberOfParams (Integer): Total number of model parameters.
Mins (1d array): Array containing the minimum values of each model parameter.
Maxs (1d array): Array containing the maximum values of each model parameter.
SDs (1d array): Array containing the expected standard deviation values of each model parameter.
alpha (Float): Scaling for chain steps.
write2file (Boolean): Flag indication whether to write the output to a file.
Default is False.
outputfilename (String): Name of the output file.
randomseed (Integer): Random Seed.
"""
def __init__(self, TargetAcceptedPoints=10000, \
NumberOfParams=2, Mins=[0.0,-1.0], Maxs=[2.0,1.0], SDs=[1.0,1.0], alpha=1.0,\
write2file=False, outputfilename='chain.mcmc', randomseed=250192, debug=False,\
EstimateCovariance=True, CovNum=100, goodchi2=35.0):
"""
Instantiates the class.
"""
np.random.seed(randomseed)
if not (NumberOfParams == len(Mins) and \
NumberOfParams==len(Maxs) and NumberOfParams==len(SDs)):
print "Length of Mins, Maxs and SDs should be same as NumberOfParams"
exit()
self.write2file=write2file
self.outputfilename=outputfilename
self.TargetAcceptedPoints = TargetAcceptedPoints
self.NumberOfParams = NumberOfParams
self.mins = np.array(Mins)
self.maxs = np.array(Maxs)
self.SD = np.array(SDs)
self.alpha = alpha
self.CovMat = 100.0 * self.alpha*np.diag(self.SD**2)
self.debug = debug
self.EstimateCovariance = EstimateCovariance
self.CovNum = CovNum
self.goodchi2 = goodchi2
#----------------------------------------------------------
def FirstStep(self):
"""
Initiates the chain.
Returns
-------
A numpy array containing random initial value for each parameter.
"""
return self.mins + \
np.random.uniform(size=self.NumberOfParams)*\
(self.maxs - self.mins)
#----------------------------------------------------------
def NextStep(self,Oldstep):
"""
Generates the next step for the chain.
Parameters
----------
Oldstep (1d array): A numpy array containing the current values of the parameters.
Returns
-------
A numpy array containing the next values of the parameters.
"""
NS = np.random.multivariate_normal(Oldstep,self.CovMat)
while np.any(NS<self.mins) or np.any(NS>self.maxs):
NS = np.random.multivariate_normal(Oldstep,self.CovMat)
return NS
#----------------------------------------------------------
def MetropolisHastings(self,Oldchi2,Newchi2):
"""
Determines the acceptance of new step based upon current step.
Parameters
----------
Oldchi2 (Float): Chi-square of the current step.
Newchi2 (Float): Chi-square of the new step.
Returns
-------
True if the new step is accepted, False otherwise.
"""
likelihoodratio = np.exp(-(Newchi2-Oldchi2)/2)
if likelihoodratio < np.random.uniform():
return False
else:
return True
#----------------------------------------------------------
def chisquare(self, Params):
"""
Computes Chi-square of the parameters.
Parameters
----------
Params (1d array): Numpy array containing values of the parameters.
Returns
-------
Value of the Chi-square.
"""
return np.random.chisquare(df=len(Params))
#----------------------------------------------------------
def MainChain(self):
"""
Runs the chain.
Returns
-------
Acceptance rate.
"""
# Initialising multiplicity and accepted number of points.
multiplicity = 0
acceptedpoints = 0
icov = 0
OneTimeUpdateCov = True
# Preparing output file
if self.write2file:
outfile = open(self.outputfilename,'w')
writestring = '%1.6f \t'*self.NumberOfParams
# Initialising the chain
OldStep = self.FirstStep()
Oldchi2 = self.chisquare(OldStep)
Bestchi2 = Oldchi2
EstCovList = []
# Chain starts here...
i=0
# for i in range(self.NumberOfSteps):
while True:
i += 1
if acceptedpoints == self.TargetAcceptedPoints:
break
if (i%1000 == 0):
print
print "Step: %i \t AcceptedPoints: %i \t TargetAcceptedPoints: %i"%(i, acceptedpoints, self.TargetAcceptedPoints)
print
multiplicity += 1
# Generating next step and its chi-square
NewStep = self.NextStep(OldStep)
Newchi2 = self.chisquare(NewStep)
if self.debug:
strFormat = self.NumberOfParams * '{:10f} '
print "Step Number: %i \t Accepted Points: %i"%(i, acceptedpoints)
print 'Old: ', Oldchi2, strFormat.format(*OldStep)
print 'New: ', Newchi2, strFormat.format(*NewStep)
print
# Checking if it is to be accepted.
GoodPoint = self.MetropolisHastings(Oldchi2,Newchi2)
# Updating step scale using a threshold chi-square.
if Newchi2<self.goodchi2 and OneTimeUpdateCov:
self.CovMat = self.alpha*np.diag(self.SD**2)
OneTimeUpdateCov = False
if GoodPoint:
# Updating number of accepted points.
acceptedpoints += 1
multiplicity = 0
# Updating the old step.
OldStep = NewStep
Oldchi2 = Newchi2
# Estimating Covariance
if self.EstimateCovariance and icov<self.CovNum and Newchi2<self.goodchi2:
icov += 1
EstCovList.append(NewStep)
print "Estimating Covariance: %i of %i points"%(icov, self.CovNum)
# Updating Covariance
if self.EstimateCovariance and icov==self.CovNum and Newchi2<self.goodchi2:
print "Covariance estimated, now updating..."
EstCovList = np.array(EstCovList)
self.CovMat = np.cov(np.transpose(EstCovList))
print "Estimated Covariance Matrix: "
print self.CovMat
print
self.EstimateCovariance = False
# Updating best chi-square so far in the chain.
if Newchi2<Bestchi2:
strFormat = self.NumberOfParams * '{:10f} '
Bestchi2=Newchi2
print "Best Chi-square so far: ", i, '\t', acceptedpoints, '\t', Bestchi2, strFormat.format(*NewStep)
# Writing accepted steps into the output file
if self.write2file:
print >>outfile, '%i \t'%i, '%1.6f \t'%Newchi2,'%i \t'%multiplicity,\
writestring%tuple(NewStep)
else:
continue
# Writing Best chi-square of the full chain and the acceptance ratio.
print "Best chi square: %1.5f"%Bestchi2
print "Acceptance Ratio: %1.5f"%(float(acceptedpoints)/i)
return float(acceptedpoints)/i
#==============================================================================
if __name__=="__main__":
print "Class: Markov Chain Monte Carlo (MCMC) method using Metropolis-Hastings algorithm."
| mit |
Barmaley-exe/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
RuthAngus/chronometer | chronometer/fit_dispersion.py | 1 | 2000 | import numpy as np
from action_age_evolution import calc_dispersion
import emcee
import corner
import matplotlib.pyplot as plt
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 15,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def lnprob(pars, x, y, yerr):
sz0, t1, beta, hsz = pars
model = calc_dispersion([np.exp(sz0), np.exp(t1), beta, np.exp(hsz)], x)
return sum(-.5*((model - y)/yerr)**2) + lnprior(pars)
def lnprior(pars):
lnsz0, lnt1, beta, lnhsz = pars
if -20 < lnsz0 < 20 and -20 < lnt1 < 20 and -100 < beta < 100 \
and -20 < lnhsz < 20:
return 0.
else:
return -np.inf
if __name__ == "__main__":
time = np.linspace(0, 14, 100)
sz0 = 50.
sr0 = 50.
t1 = .1
tm = 10.
beta = .33
R0 = 1.
Rc = 1.
hsz = 9.
hsr = 9.
solar_radius = 8.
hr = 2.68/solar_radius
# Today
sr = 34.
sz = 25.1
zpar_init = np.array([np.log(sz0), np.log(t1), beta, np.log(hsz)])
rpar_init = np.array([np.log(sr0), np.log(t1), beta, np.log(hsz)])
sigma_z = calc_dispersion([sz0 + 5, t1, beta + .2, hsz], time)
sigma_r = calc_dispersion([sr0 + 5, t1, beta + .2, hsz], time)
print(lnprob(zpar_init, time, sigma_z, sigma_z*.1))
x, y, yerr = time, sigma_z, sigma_z*.1
ndim, nwalkers, nsteps = len(zpar_init), 24, 10000
p0 = [1e-4*np.random.rand(ndim) + zpar_init for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[x, y, yerr])
pos, _, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
sampler.run_mcmc(pos, nsteps)
flat = np.reshape(sampler.chain, (nwalkers*nsteps, ndim))
# flat[:, :2] = np.exp(flat[:, :2])
# flat[:, 3:] = np.exp(flat[:, 3:])
labels = ["$\ln \sigma_{z0}$", "$t_1$", "$\\beta$", "$\sigma_{Hz}$"]
fig = corner.corner(flat, labels=labels)
fig.savefig("zcorner")
| mit |
spallavolu/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
robbymeals/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
public-ink/public-ink | server/appengine/lib/matplotlib/offsetbox.py | 10 | 54984 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.path as mpath
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = list(zip(*wd_list))
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
# Clipping has not been implemented in the OffesetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from .cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from .cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
self.stale = True
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
@martist.Artist.axes.setter
def axes(self, ax):
# TODO deal with this better
martist.Artist.axes.fset(self, ax)
for c in self.get_children():
if c is not None:
c.axes = ax
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
self.stale = True
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if six.callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
self.stale = True
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
self.stale = True
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes. Can be one of ``top``, ``bottom``,
``left``, ``right``, ``center`` and ``baseline``
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes.
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str
Alignment of boxes.
mode : str
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
self.stale = True
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed. The children can be clipped at the
boundaries of the parent.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=False):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
*clip* : Whether to clip the children
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self._clip_children = clip
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
@property
def clip_children(self):
"""
If the children of this DrawingArea should be clipped
by DrawingArea bounding box.
"""
return self._clip_children
@clip_children.setter
def clip_children(self, val):
self._clip_children = bool(val)
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
if self.axes is not None:
a.axes = self.axes
fig = self.figure
if fig is not None:
a.set_figure(fig)
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
# At this point the DrawingArea has a transform
# to the display space so the path created is
# good for clipping children
tpath = mtransforms.TransformedPath(
mpath.Path([[0, 0], [0, self.height],
[self.width, self.height],
[self.width, 0]]),
self.get_transform())
for c in self._children:
if self._clip_children and not (c.clipbox or c._clippath):
c.set_clip_path(tpath)
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
Parameters
----------
s : str
a string to be displayed.
textprops : `~matplotlib.font_manager.FontProperties`, optional
multilinebaseline : bool, optional
If `True`, baseline for multiline text is adjusted so that
it is (approximatedly) center-aligned with singleline
text.
minimumdescent : bool, optional
If `True`, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"Set the text of this area as a string."
self._text.set_text(s)
self.stale = True
def get_text(self):
"Returns the string representation of this area's text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
self.stale = True
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
self.stale = True
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = list(xrange(11))
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : string
Text.
loc : str
Location code.
pad : float, optional
Pad between the text and the frame as fraction of the font
size.
borderpad : float, optional
Pad between the frame and the axes (or *bbox_to_anchor*).
prop : `matplotlib.font_manager.FontProperties`
Font properties.
Notes
-----
Other keyword parameters of `AnchoredOffsetbox` are also
allowed.
"""
if prop is None:
prop = {}
propkeys = list(six.iterkeys(prop))
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
OffsetBox.__init__(self)
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = dpi_cor * nx * zoom, dpi_cor * ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
martist.Artist.__init__(self, **kwargs)
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if xybox is None:
self.xybox = xy
else:
self.xybox = xybox
if boxcoords is None:
self.boxcoords = xycoords
else:
self.boxcoords = boxcoords
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
self.stale = True
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
self.stale = True
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
self.stale = True
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xybox
if isinstance(self.boxcoords, tuple):
xcoord, ycoord = self.boxcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
self.stale = False
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
try:
c1 = self._c1
except AttributeError:
pass
else:
self.canvas.mpl_disconnect(c1)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
self.ox, self.oy = ann.get_transform().transform(ann.xyann)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = ann.get_transform().inverted().transform(
(self.ox + dx, self.oy + dy))
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| gpl-3.0 |
tlhr/plumology | plumology/vis.py | 1 | 16471 | """vis - Visualisation and plotting tools"""
from typing import Union, Sequence, Optional, List, Tuple
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.collections import RegularPolyCollection
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
from .calc import stats, chunk_range, free_energy
from .calc import dist1d as calc_dist1D
from .io import read_multi, read_plumed
__all__ = ['fast', 'dist1D', 'dist2D', 'hexplot', 'histogram', 'dihedral',
'history', 'interactive', 'metai', 'rmsd', 'convergence']
def fast(filename: str,
step: int=1,
columns: Union[Sequence[int], Sequence[str], str, None]=None,
start: int=0,
stop: int=sys.maxsize,
stat: bool=True,
plot: bool=True) -> None:
"""
Plot first column with every other column and show statistical information.
Parameters
----------
filename : Plumed file to read.
step : Reads every step-th line instead of the whole file.
columns : Column numbers or field names to read from file.
start : Starting point in lines from beginning of file,
including commented lines.
stop : Stopping point in lines from beginning of file,
including commented lines.
stat : Show statistical information.
plot : Plot Information.
"""
if columns is not None:
if isinstance(columns, str):
columns = [columns]
if 'time' not in columns:
columns.insert(0, 'time')
data = read_multi(
filename,
columns=columns,
step=step,
start=start,
stop=stop,
dataframe=True
)
if len(data['time'].values.shape) > 1:
time = data['time'].values[:, 0]
data = data.drop(['time'], axis=1)
data['time'] = time
if stat:
stat_strings = stats(data.columns, data.values)
for s in stat_strings:
print(s)
if plot:
fig = plt.figure(figsize=(16, 3 * len(data.columns)))
i = 0
for col in data.columns:
if col == 'time':
continue
i += 1
ax = fig.add_subplot(len(data.columns) // 2 + 1, 2, i)
ax.plot(data['time'], data[col])
ax.set_xlabel('time')
ax.set_ylabel(col)
def hexplot(
ax: plt.Axes,
grid: np.ndarray,
data: np.ndarray,
hex_size: float=11.5,
cmap: str='viridis'
) -> plt.Axes:
"""
Plot grid and data on a hexagon grid. Useful for SOMs.
Parameters
----------
ax : Axes to plot on.
grid : Array of (x, y) tuples.
data : Array of len(grid) with datapoint.
hex_size : Radius in points determining the hexagon size.
cmap : Colormap to use for colouring.
Returns
-------
ax : Axes with hexagon plot.
"""
# Create hexagons
collection = RegularPolyCollection(
numsides=6,
sizes=(2 * np.pi * hex_size ** 2,),
edgecolors=(0, 0, 0, 0),
transOffset=ax.transData,
offsets=grid,
array=data,
cmap=plt.get_cmap(cmap)
)
# Scale the plot properly
ax.add_collection(collection, autolim=True)
ax.set_xlim(grid[:, 0].min() - 0.75, grid[:, 0].max() + 0.75)
ax.set_ylim(grid[:, 1].min() - 0.75, grid[:, 1].max() + 0.75)
ax.axis('off')
return ax
def dihedral(cvdata: pd.DataFrame,
cmap: Optional[ListedColormap]=None) -> plt.Figure:
"""
Plot dihedral angle data as an eventplot.
Parameters
----------
cvdata : Dataframe with angle data only.
cmap : Colormap to use.
Returns
-------
fig : Figure with drawn events.
"""
# Custom periodic colormap
if cmap is None:
# Define some colors
blue = '#2971B1'
red = '#B92732'
white = '#F7F6F6'
black = '#3B3B3B'
# Define the colormap
periodic = LinearSegmentedColormap.from_list(
'periodic',
[black, red, red, white, blue, blue, black],
N=2560,
gamma=1
)
cmap = periodic
# Setup plot
fig, axes = plt.subplots(figsize=(16, 32), nrows=cvdata.shape[1])
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, hspace=0)
for ax, col in zip(axes, cvdata.columns):
# No interpolation because we want discrete lines
ax.imshow(np.atleast_2d(cvdata[col]), aspect='auto',
cmap=cmap, interpolation='none')
# Create labels
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, col, va='center', ha='right', fontsize=10)
# Remove clutter
ax.set_xticks([])
ax.set_yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
return fig
def history(cvdata: pd.DataFrame) -> None:
"""
Plot CV history as a 2D scatter plot.
Parameters
----------
cvdata : Dataframe with time as first column and CV values for the rest.
"""
fig = plt.figure(figsize=(16, 3 * len(cvdata.columns) // 3))
for i, col in enumerate(cvdata.columns):
if col == 'time':
continue
ax = fig.add_subplot(len(cvdata.columns) // 3 + 1, 3, i)
ax.plot(cvdata['time'], cvdata[col], 'o')
ax.set_xlabel('time')
ax.set_ylabel(col)
plt.tight_layout()
def histogram(cvdata: pd.DataFrame,
cv_min: Optional[Sequence[float]]=None,
cv_max: Optional[Sequence[float]]=None,
time: Optional[float]=None,
nchunks: int=3,
nbins: int=50) -> None:
"""
Plots histograms of CVs in predefined chunks.
Parameters
----------
cvdata : Dataframe with time as first column and CV values for the rest.
cv_min : Minimum possible values for CVs.
cv_max : Maximum possible values for CVs.
time : If given, timespan of the first histogram,
equal to other chunks otherwise.
nchunks : Number of histograms to plot.
nbins : Number of bins to use for the histogram.
"""
if cv_min is None or cv_max is None:
cv_min = [cvdata[cv].values.min() for cv in cvdata.columns]
cv_max = [cvdata[cv].values.max() for cv in cvdata.columns]
plot_chunks = nchunks
chunks = chunk_range(cvdata['time'].values[0],
cvdata['time'].values[-2],
nchunks, time)
fig = plt.figure(figsize=(16, 3 * len(cvdata.columns)))
for i, col in enumerate(cvdata.columns):
if col == 'time':
continue
for j, time in enumerate(chunks):
ax = fig.add_subplot(len(cvdata.columns),
plot_chunks,
nchunks * i + j + 1)
hist, bins = np.histogram(cvdata[cvdata['time'] < time][col],
range=(cv_min[i - 1], cv_max[i - 1]),
bins=nbins, normed=False)
center = (bins[:-1] + bins[1:]) / 2
width = (abs(cv_min[i - 1]) + abs(cv_max[i - 1])) / nbins
ax.bar(center, hist, width=width, align='center')
ax.set_xlabel(col)
ax.set_xlim(cv_min[i - 1], cv_max[i - 1])
plt.tight_layout()
def convergence(
hills: pd.DataFrame,
summed_hills: pd.DataFrame,
time: int,
kbt: float,
factor: float=1.0,
constant: float=0.0
) -> plt.Figure:
"""
Estimate convergence by comparing CV histograms and sum_hills output.
Parameters
----------
hills : Hills files to be passed to PLUMED, globbing allowed.
summed_hills : Output from io.sum_hills().
time : The minimum time to use from the histogram.
kbt : k_B * T for the simulation as output by PLUMED.
factor : Factor to rescale the FES.
constant : Constant to translate the FES.
Returns
-------
fig : Matplotlib figure.
"""
dist, ranges = calc_dist1D(hills[hills['time'] > time])
fes = factor * free_energy(dist, kbt) + constant
# consistent naming
summed_hills.columns = hills.columns.drop('time')
# sum_hills binning can be inconsistent
if summed_hills.shape[0] > fes.shape[0]:
summed_hills = summed_hills[:fes.shape[0]]
ncols = len(fes.columns)
fig = plt.figure(figsize=(16, 4 * (ncols // 2 + 1)))
for i, col in enumerate(fes.columns):
ax = fig.add_subplot(ncols // 2 + 1, 2, i + 1)
ax.plot(ranges[col], fes[col], label='histogram')
ax.plot(ranges[col], summed_hills[col], label='sum_hills')
ax.set_xlabel(col)
ax.legend()
return fig
def dist1D(dist: pd.DataFrame,
ranges: pd.DataFrame,
grouper: Optional[str]=None,
nx: Optional[int]=2,
size: Optional[Tuple[int, int]]=(8, 6)) -> plt.Figure:
"""
Plot 1D probability distributions.
Parameters
----------
dist : Multiindexed dataframe with force field as primary
index and distributions as created by dist1D().
ranges : Multiindexed dataframe with force field as primary
index and edges as created by dist1D().
grouper : Primary index to use for plotting multiple lines.
nx : Number of plots per row.
size : Relative size of each plot.
Returns
-------
fig : matplotlib figure.
"""
# Setup plotting parameters
if grouper is not None:
for k, df in dist.groupby(level=[grouper]):
cols = df.columns
break
else:
cols = dist.columns
nplots = len(cols)
xsize, ysize = nx, nplots // nx + 1
fig = plt.figure(figsize=(xsize * size[0], ysize * size[1]))
# Iterate over CVs
for j, col in enumerate(cols):
ax = fig.add_subplot(ysize, xsize, j + 1)
ax.set_xlabel(col)
if grouper is not None:
# Iterate over dataframes in groupby object, plot them together
for (k, df), (_, rf) in zip(dist.groupby(level=[grouper]),
ranges.groupby(level=[grouper])):
ax.plot(rf[col], df[col], label=k, linewidth=2)
else:
ax.plot(ranges[col], dist[col], linewidth=2)
if grouper is not None:
ax.legend(loc=2, framealpha=0.75)
return fig
def dist2D(dist: pd.DataFrame,
ranges: pd.DataFrame,
nlevels: int=16,
nx: int=2,
size: int=6,
colorbar: bool=True,
name: str='dist') -> plt.Figure:
"""
Plot 2D probability distributions.
Parameters
----------
dist : Multiindexed dataframe with force field as primary
index and distributions as created by dist2D().
ranges : Multiindexed dataframe with force field as primary
index and edges as created by dist1D().
nlevels : Number of contour levels to use.
nx : Number of plots per row.
size : Relative size of each plot.
colorbar : If true, will plot a colorbar.
name : Name of the distribution.
Returns
-------
fig : matplotlib figure.
"""
# Setup plotting parameters
nplots = dist.shape[1]
xsize, ysize = nx, (nplots // nx) + 1
cmap = plt.get_cmap('viridis')
fig = plt.figure(figsize=(xsize * size, ysize * size))
for i, k in enumerate(dist.keys()):
# Get keys for both CVs
kx, ky = k.split('.')
# Prepare plotting grid (np.meshgrid doesn't work)
X = np.broadcast_to(ranges[kx], dist[k].unstack().shape)
Y = np.broadcast_to(ranges[ky], dist[k].unstack().shape).T
Z = dist[k].unstack().values.T
# Contour levels taking inf into account
levels = np.linspace(np.amin(Z[~np.isinf(Z)]),
np.amax(Z[~np.isinf(Z)]), nlevels)
ax = fig.add_subplot(ysize, xsize, i + 1)
cm = ax.contourf(X, Y, Z, cmap=cmap, levels=levels)
ax.set_xlabel(kx)
ax.set_ylabel(ky)
ax.set_title(name)
if colorbar:
fig.colorbar(cm)
return fig
def rmsd(rmsds: pd.DataFrame,
aspect: Tuple[int, int]=(4, 6),
nx: int=5) -> plt.Figure:
"""
Plot RMSDs.
Parameters
----------
rmsds : Dataframe with force field as index and CVs as columns.
aspect : Aspect ratio of individual plots.
nx : Number of plots before wrapping to next row.
Returns
-------
rmsd : Figure with RMSDs.
"""
ny = len(rmsds.columns) // nx + 1
fig = plt.figure(figsize=(nx * aspect[0], ny * aspect[1]))
for i, col in enumerate(rmsds.columns):
nbars = rmsds[col].shape[0]
ax = fig.add_subplot(ny, nx, i + 1)
ax.bar(np.arange(nbars), rmsds[col].values, linewidth=0)
ax.set_title(col)
ax.set_xticks(np.linspace(0.5, 0.5 + nbars - 1, nbars))
ax.set_xticklabels(list(rmsds[col].keys()))
ax.set_ylabel('RMSD [{0}]'.format('ppm' if 'CS' in col else 'Hz'))
plt.setp(
plt.gca().get_xticklabels(),
rotation=45,
horizontalalignment='right'
)
plt.tight_layout()
return fig
def metai(file: str,
step: int=1,
start: int=0,
stop: int=sys.maxsize) -> None:
"""
Plot metainference information.
Parameters
----------
file : Plumed file to read.
step : Plot every step-th value.
start : Start plotting from here.
stop : Stop plotting here.
"""
data = read_plumed(
file,
step=step,
start=start,
stop=stop
)
ny, nx = len(data.columns) // 2 + 1, 2
fig = plt.figure(figsize=(nx * 8, ny * 5))
ax_sigmaMean = fig.add_subplot(ny, nx, 1)
ax_sigma = fig.add_subplot(ny, nx, 2)
ax_kappa = fig.add_subplot(ny, nx, 3)
sc = 0
i = 4
for col in data.columns:
if 'sigmaMean' in col:
ax_sigmaMean.plot(data['time'], data[col], label=col.split('_')[1])
sc += 1
elif 'sigma' in col:
ax_sigma.plot(data['time'], data[col], label=col.split('_')[1])
elif 'time' not in col:
ax = fig.add_subplot(ny, nx, i)
i += 1
ax.plot(data['time'], data[col])
ax.set_xlabel('time')
ax.set_ylabel(col)
name = data.columns[1].split('.')[0]
for j in range(sc):
kappa = 1 / (data[name + '.sigmaMean_' + str(j)] ** 2 +
data[name + '.sigma_' + str(j)] ** 2)
ax_kappa.plot(data['time'], kappa, label=j)
ax_sigmaMean.set_xlabel('time')
ax_sigmaMean.set_ylabel('sigma_mean')
ax_sigmaMean.legend(loc=3, framealpha=0.75)
ax_sigma.set_xlabel('time')
ax_sigma.set_ylabel('sigma')
ax_sigma.legend(loc=1, framealpha=0.75)
ax_kappa.set_xlabel('time')
ax_kappa.set_ylabel('kappa')
ax_kappa.legend(loc=1, framealpha=0.75)
def interactive(file: str,
x: Union[str, int]=0,
y: Union[str, int, List[str], List[int]]=1,
step: int=1,
start: int=0,
stop: int=sys.maxsize) -> None:
"""
Plot values interactively.
Parameters
----------
file : Plumed file to read.
x : x-axis to use for plotting, can be specified
either as column index or field.
y : y-axis to use for plotting, can be specified
either as column index or field.
step : Plot every step-th value.
start : Start plotting from here.
stop : Stop plotting here.
"""
try:
from bokeh.plotting import show, figure
import bokeh.palettes as pal
except ImportError:
raise ImportError(
'Interactive plotting requires Bokeh to be installed'
)
cols = [x] + y if isinstance(y, list) else [x] + [y]
fields, data = read_plumed(
file,
step=step,
start=start,
stop=stop,
columns=cols,
dataframe=False
)
TOOLS = 'pan,wheel_zoom,box_zoom,resize,hover,reset,save'
p = figure(tools=TOOLS, plot_width=960, plot_height=600)
for i in range(1, len(fields)):
p.line(
data[:, 0],
data[:, i],
legend=fields[i],
line_color=pal.Spectral10[i],
line_width=1.5
)
p.xaxis.axis_label = fields[0]
p.yaxis.axis_label = fields[1]
show(p)
| mit |
DTMilodowski/LiDAR_canopy | src/inventory_based_LAD_profiles.py | 1 | 40714 | # This contains code to estimate LAD profiles based on field measurements of
# tree height and crown dimensions, in addition to crown depth based on a
# regional allometric equation.
import numpy as np
from scipy import stats
pi=np.pi
# linear regression with confidence intervals and prediction intervals
def linear_regression(x_,y_,conf=0.95):
mask = np.all((np.isfinite(x_),np.isfinite(y_)),axis=0)
x = x_[mask]
y = y_[mask]
# regression to find power law exponents D = a.H^b
m, c, r, p, serr = stats.linregress(x,y)
x_i = np.arange(x.min(),x.max(),(x.max()-x.min())/1000.)
y_i = m*x_i + c
PI,PI_u,PI_l = calculate_prediction_interval(x_i,x,y,m,c,conf)
CI,CI_u,CI_l = calculate_confidence_interval(x_i,x,y,m,c,conf)
model_y = m*x + c
error = y-model_y
MSE = np.mean(error**2)
return m, c, r**2, p, x_i, y_i, CI_u, CI_l, PI_u, PI_l
# log-log regression with confidence intervals
def log_log_linear_regression(x,y,conf=0.95):
mask = np.all((np.isfinite(x),np.isfinite(y)),axis=0)
logx = np.log(x[mask])
logy = np.log(y[mask])
# regression to find power law exponents D = a.H^b
b, loga, r, p, serr = stats.linregress(logx,logy)
logx_i = np.arange(logx.min(),logx.max(),(logx.max()-logx.min())/1000.)
PI,PI_upper,PI_lower = calculate_prediction_interval(logx_i,logx,logy,b,loga,conf)
x_i = np.exp(logx_i)
PI_u = np.exp(PI_upper)
PI_l = np.exp(PI_lower)
model_logy = b*logx + loga
error = logy-model_logy
MSE = np.mean(error**2)
CF = np.exp(MSE/2) # Correction factor due to fitting regression in log-space (Baskerville, 1972)
a = np.exp(loga)
return a, b, CF, r**2, p, x_i, PI_u, PI_l
#=================================
# ANALYTICAL CONFIDENCE AND PREDICTION INTERVALS
# Calculate confidence intervals analytically (assumes normal distribution)
# x_i = x location at which to calculate the confidence interval
# x_obs = observed x values used to fit model
# y_obs = corresponding y values
# m = gradient
# c = constant
# conf = confidence interval
# returns dy - the confidence interval
def calculate_confidence_interval(x_i,x_obs,y_obs,m,c,conf):
alpha = 1.-conf
n = x_obs.size
y_mod = m*x_obs+c
se = np.sqrt(np.sum((y_mod-y_obs)**2/(n-2)))
x_mean = x_obs.mean()
# Quantile of Student's t distribution for p=1-alpha/2
q=stats.t.ppf(1.-alpha/2.,n-2)
dy = q*se*np.sqrt(1/float(n)+((x_i-x_mean)**2)/np.sum((x_obs-x_mean)**2))
y_exp=m*x_i+c
upper = y_exp+abs(dy)
lower = y_exp-abs(dy)
return dy,upper,lower
# Calculate prediction intervals analytically (assumes normal distribution)
# x_i = x location at which to calculate the prediction interval
# x_obs = observed x values used to fit model
# y_obs = corresponding y values
# m = gradient
# c = constant
# conf = confidence interval
# returns dy - the prediction interval
def calculate_prediction_interval(x_i,x_obs,y_obs,m,c,conf):
alpha = 1.-conf
n = x_obs.size
y_mod = m*x_obs+c
se = np.sqrt(np.sum((y_mod-y_obs)**2/(n-2)))
x_mean = x_obs.mean()
# Quantile of Student's t distribution for p=1-alpha/2
q=stats.t.ppf(1.-alpha/2.,n-2)
dy = q*se*np.sqrt(1+1/float(n)+((x_i-x_mean)**2)/np.sum((x_obs-x_mean)**2))
y_exp=m*x_i+c
upper = y_exp+abs(dy)
lower = y_exp-abs(dy)
return dy,upper,lower
# Calculate a prediction based on a linear regression model
# As above, but this time randomly sampling from prediction interval
# m = regression slope
# c = regression interval
def random_sample_from_regression_model_prediction_interval(x_i,x_obs,y_obs,m,c,array=False):
mask = np.all((np.isfinite(x_obs),np.isfinite(y_obs)),axis=0)
x_obs=x_obs[mask]
y_obs=y_obs[mask]
n = x_obs.size
y_mod = m*x_obs+c
se = np.sqrt(np.sum((y_mod-y_obs)**2/(n-2)))
y_exp = x_i*m+c # expected value of y from model
x_mean = x_obs.mean()
# randomly draw quantile from t distribution (n-2 degrees of freedom for linear regression)
if array:
q = np.random.standard_t(n-2,size=x_i.size)
else:
q = np.random.standard_t(n-2)
dy = q*se*np.sqrt(1+1/float(n)+((x_i-x_mean)**2)/np.sum((x_obs-x_mean)**2))
y_i = y_exp+dy
return y_i
# as above, but using log-log space (i.e. power law functions)
# a = scalar
# b = exponent
def random_sample_from_powerlaw_prediction_interval(x_i,x_obs,y_obs,a,b,array=False):
if array:
logy_i = random_sample_from_regression_model_prediction_interval(np.log(x_i),np.log(x_obs),np.log(y_obs),b,np.log(a),array=True)
else:
logy_i = random_sample_from_regression_model_prediction_interval(np.log(x_i),np.log(x_obs),np.log(y_obs),b,np.log(a))
y_i = np.exp(logy_i)
return y_i
#=================================
# BOOTSTRAP TOOLS
# Calculate prediction intervals through bootstrapping and resampling from residuals.
# The bootstrap model accounts for parameter uncertainty
# The residual resampling accounts for uncertainty in the residual - i.e. effects not
# accounted for by the regression model
# Inputs:
# - x_i = x location(s) at which to calculate the prediction interval
# This should be either a numpy array or single value. nD arrays will be
# converted to 1D arrays
# - x_obs = the observed x values used to fit model
# - y_obs = corresponding y values
# - conf = confidence interval, as fraction
# - niter = number of iterations over which to bootstrap
# - n_i = number of locations x_i (default is
# Returns:
# - ll and ul = the upper and lower bounds of the confidence interval
def calculate_prediction_interval_bootstrap_resampling_residuals(x_i,x_obs,y_obs,conf,niter):
from matplotlib import pyplot as plt
# some fiddles to account for likely possible data types for x_i
n=0
if np.isscalar(x_i):
n=1
else:
try:
n=x_i.size # deal with numpy arrays
if x_i.ndim > 1: # linearize multidimensional arrays
x_i=x_i.reshape(n)
except TypeError:
print("Sorry, not a valid type for this function")
y_i = np.zeros((n,niter))*np.nan
# Bootstrapping
for ii in range(0,niter):
# resample observations (with replacement)
ix = np.random.choice(x_obs.size, size=n,replace=True)
x_boot = np.take(x_obs,ix)
y_boot = np.take(y_obs,ix)
# regression model
m, c, r, p, serr = stats.linregress(x_boot,y_boot)
# randomly sample from residuals with replacement
res = np.random.choice((y_boot-(m*x_boot + c)),size = n,replace=True)
# estimate y based on model and randomly sampled residuals
y_i[:,ii] = m*x_i + c + res
# confidence intervals simply derived from the distribution of y
ll=np.percentile(y_i,100*(1-conf)/2.,axis=1)
ul=np.percentile(y_i,100*(conf+(1-conf)/2.),axis=1)
return ll,ul
# equivalent to above but for log-log space prediction
def calculate_powerlaw_prediction_interval_bootstrap_resampling_residuals(x_i,x_obs,y_obs,
conf=.9,niter=1000):
log_ll,log_ul = calculate_prediction_interval_bootstrap_resampling_residuals(np.log(x_i),np.log(x_obs),np.log(y_obs),conf,niter)
return np.exp(log_ll),np.exp(log_ul)
# Calculate a prediction based on a linear regression model
# As above, but this time randomly sampling from prediction interval
# calculated using random sampling from residuals.
# Note that this is intended to be used within a montecarlo framework
# m = regression slope
# c = regression interval
def random_sample_from_bootstrap_linear_regression_prediction_interval(x_i,x_obs,y_obs):
# some fiddles to account for likely possible data types for x_i
n=0
if np.isscalar(x_i):
n=1
else:
try:
n=x_i.size # deal with numpy arrays
if x_i.ndim > 1: # linearize multidimensional arrays
x_i=x_i.reshape(n)
except TypeError:
print("Sorry, not a valid type for this function")
# resample observations (with replacement) i.e. one iteration of bootstrap procedure
ix = np.random.choice(x_obs.size, size=n,replace=True)
x_boot = np.take(x_obs,ix)
y_boot = np.take(y_obs,ix)
# regression model
m, c, r, p, serr = stats.linregress(x_boot,y_boot)
# randomly sample from residuals with replacement
res = np.random.choice((y_boot-(m*x_boot + c)),size = n,replace=True)
# estimate y based on model and randomly sampled residuals
y_i = m*x_i + c + res
return y_i
# as above but fitting relationship in log space
def random_sample_from_bootstrap_powerlaw_prediction_interval(x_i,x_obs,y_obs):
logy_i = random_sample_from_bootstrap_linear_regression_prediction_interval(np.log(x_i),np.log(x_obs),np.log(y_obs))
y_i = np.exp(logy_i)
return y_i
#================================
# INVENTORY BASED PROFILES
# This function reads in the crown allometry data from the database: Falster et al,. 2015; BAAD: a Biomass And Allometry Database for woody plants. Ecology, 96: 1445. doi: 10.1890/14-1889.1
def retrieve_crown_allometry(filename,conf=0.9):
datatype = {'names': ('ID', 'Ref', 'Location', 'Lat', 'Long', 'Species', 'Family','Diameter','Height','CrownArea','CrownDepth'), 'formats': ('int_','S32','S256','f','f','S32','S32','f','f','f','f')}
data = np.genfromtxt(filename, skip_header = 1, delimiter = ',',dtype=datatype)
mask = np.all((~np.isnan(data['Height']),~np.isnan(data['CrownDepth'])),axis=0)
H = data['Height'][mask]
D = data['CrownDepth'][mask]
logH = np.log(H)
logD = np.log(D)
# regression to find power law exponents D = a.H^b
b, loga, r, p, serr = stats.linregress(logH,logD)
logH_i = np.arange(logH.min(),logH.max(),(logH.max()-logH.min())/1000.)
PI,PI_upper,PI_lower = calculate_prediction_interval(logH_i,logH,logD,b,loga,conf=conf)
H_i = np.exp(logH_i)
PI_u = np.exp(PI_upper)
PI_l = np.exp(PI_lower)
model_logD = b*logH + loga
error = logD-model_logD
MSE = np.mean(error**2)
CF = np.exp(MSE/2.) # Correction factor due to fitting regression in log-space (Baskerville, 1972)
a = np.exp(loga)
return a, b, CF, r**2, p, H, D, H_i, PI_u, PI_l
def load_BAAD_crown_allometry_data(filename):
datatype = {'names': ('ID', 'Ref', 'Location', 'Lat', 'Long', 'Species', 'Family',
'Diameter','Height','CrownArea','CrownDepth'),
'formats': ('int_','S32','S256','f','f','S32','S32','f','f','f','f')}
data = np.genfromtxt(filename, skip_header = 1, delimiter = ',',dtype=datatype)
mask = np.all((~np.isnan(data['Diameter']),~np.isnan(data['CrownDepth'])),axis=0)
H = data['Height'][mask]
D = data['CrownDepth'][mask]
DBH = data['Diameter'][mask]
return DBH, H, D
def load_BAAD_allometry_data(filename, filter_TropRF = True, filter_nodata = True,
filter_dbh = True,filter_status='None'):
datatype = ['S10','S100','f','f','S6','f','f', 'S200','f','S100','S100','S100',
'S4','S4','S4','S100','f','f','f','f','f','f','f','f','f','f','f',
'f','f','f','f','f','f','f','f','f','f','f','f','f','f','f','f','f',
'f','f','f','f','f','f','f','f','f','f','f','f','f','f','f','f','f','f']
data = np.genfromtxt(filename, names=True, delimiter = ',',dtype=datatype)
if filter_TropRF:
mask = data['vegetation']=='TropRF'
data = data[mask]
if filter_nodata:
mask = np.all((np.isfinite(data['cd']),np.isfinite(data['ht'])),axis=0)
data = data[mask]
if filter_dbh:
mask = np.any((data['dbh']>=0.1,data['dba']>=0.1,data['ht']>=2,np.all((data['ht']>=5,np.isnan(data['dbh']),np.isnan(data['dba'])),axis=0)),axis=0)
data = data[mask]
if filter_status != 'None':
mask = data['status']==filter_status
data=data[mask]
return data
# Derive local allometric relationship between DBH and height -> fill gaps in census data
def load_crown_survey_data(census_file):
datatype = {'names': ('plot','subplot','date','observers','tag','DBH','H_DBH','Height','flag','alive','C1','C2','subplotX','subplotY','density','spp','cmap_date','Xfield','Yfield','Zfield','DBH_field','Height_field','CrownArea','C3','dead_flag1','dead_flag2','dead_flag3','brokenlive_flag'), 'formats': ('S16','i8','S10','S32','i8','f','f','f','S8','i8','S132','S132','f','f','f','S64','S10','f','f','f','f','f','f','S132','i8','i8','i8','i8')}
data = np.genfromtxt(census_file, skip_header = 1, delimiter = ',',dtype=datatype)
return data
def calculate_allometric_equations_from_survey(data):
# first do tree heights
mask = np.all((~np.isnan(data['DBH_field']),~np.isnan(data['Height_field'])),axis=0)
H = data['Height_field'][mask]
DBH = data['DBH_field'][mask]
logH = np.log(H)
logDBH = np.log(DBH)
# regression to find power law exponents H = a.DBH^b
b_ht, loga, r, p, serr = stats.linregress(logDBH,logH)
model_logH = b_ht*logDBH + loga
error = logH-model_logH
MSE = np.mean(error**2)
CF_ht = np.exp(MSE/2) # Correction factor due to fitting regression in log-space (Baskerville, 1972)
a_ht = np.exp(loga)
# now do crown areas
mask = np.all((~np.isnan(data['CrownArea']),~np.isnan(data['DBH_field']),~np.isnan(data['Height'])),axis=0)
DBH = data['DBH_field'][mask]
A = data['CrownArea'][mask]
logDBH = np.log(DBH)
logA = np.log(A)
# regression to find power law exponents A = a.DBH^b
b_A, loga, r, p, serr = stats.linregress(logDBH,logA)
model_logA = b_A*logDBH + loga
error = logA-model_logA
MSE = np.mean(error**2)
CF_A = np.exp(MSE/2) # Correction factor due to fitting regression in log-space (Baskerville, 1972)
a_A = np.exp(loga)
return a_ht, b_ht, CF_ht, a_A, b_A, CF_A
# Apply power law allometric models to estimate crown depths from heights
def calculate_crown_dimensions(DBH,Ht,Area, a_ht, b_ht, CF_ht, a_area, b_area, CF_area, a_depth, b_depth, CF_depth):
# Gapfill record with local allometry
# Heights
mask = np.isnan(Ht)
Ht[mask] = CF_ht*a_ht*DBH[mask]**b_ht
#Crown areas
mask = np.isnan(Area)
Area[mask] = CF_area*a_area*DBH[mask]**b_area
# Apply canopy depth model
Depth = CF_depth*a_depth*Ht**b_depth
# Remove any existing nodata values (brought forwards from input data
mask = np.all((~np.isnan(Depth),~np.isnan(Ht),~np.isnan(Area)),axis=0)
Depth = Depth[mask]
Ht = Ht[mask]
Area = Area[mask]
return Ht, Area, Depth
# As above, but randomly sampling from prediction intervals for gapfilling
# Note that allometries are taken from local data (ref1_...) except for crown depth
# which comes from an allometic database (ref2_...).
def calculate_crown_dimensions_mc(DBH,Ht,Area,ref1_DBH,ref1_Ht,ref1_Area,ref2_DBH,ref2_Ht,ref2_D, a_ht, b_ht, a_area, b_area, a_depth, b_depth):
# Gapfill record with local allometry
# Heights
mask = np.isnan(Ht)
Ht[mask] = random_sample_from_powerlaw_prediction_interval(DBH[mask],ref1_DBH,ref1_Ht,b_ht,a_ht,array=True)
#Ht[mask] = CF_ht*a_ht*DBH[mask]**b_ht
#Crown areas
mask = np.isnan(Area)
Area[mask] = random_sample_from_powerlaw_prediction_interval(DBH[mask],ref1_DBH,ref1_Area,b_area,a_area,array=True)
# Apply canopy depth model (from regional database)
#Depth = random_sample_from_powerlaw_prediction_interval(DBH,ref_DBH,ref_D,b_depth,a_depth,array=True)
Depth = random_sample_from_powerlaw_prediction_interval(Ht,ref2_Ht,ref2_D,b_depth,a_depth,array=True)
# Remove any existing nodata values (brought forwards from input data
mask = np.all((np.isfinite(Depth),np.isfinite(Ht),np.isfinite(Area)),axis=0)
Depth = Depth[mask]
Ht = Ht[mask]
Area = Area[mask]
return Ht, Area, Depth
# a, b, c = principal axes of the ellipse. Initially assume circular horizontal plane i.e. a = b
# z0 = vertical position of origin of ellipse
def construct_ellipses_for_subplot(Ht, Area, Depth):
z0 = Ht-Depth/2.
a = np.sqrt(Area/np.pi)
b = a.copy()
c = Depth/2.
#c= a.copy()
#z0 = Ht-c
return a, b, c, z0
# Retrieve canopy profiles based on an ellipsoidal lollipop model
# The provided ht_u vector contains the upper boundary of the canopy layers
def calculate_LAD_profiles_ellipsoid(canopy_layers, a, b, c, z0, plot_area, leafA_per_unitV=1.):
layer_thickness = np.abs(canopy_layers[1]-canopy_layers[0])
N_layers = canopy_layers.size
CanopyV = np.zeros(N_layers)
pi=np.pi
zeros = np.zeros(a.size)
# Formula for volume of ellipsoidal cap: V = pi*a*b*x**2*(3c-x)/c**2 where x is the vertical distance from the top of the sphere along axis c.
# Formula for volume of ellipsoid: V = 4/3*pi*a*b*c
for i in range(0,N_layers):
ht_u = canopy_layers[i]
ht_l = ht_u-layer_thickness
mask = np.all((z0+c>=ht_l,z0-c<=ht_u),axis=0)
x1 = np.max((z0+c-ht_u,zeros),axis=0)
x2 = np.min((z0+c-ht_l,2*c),axis=0)
CanopyV[i]+= np.sum(pi/3.*a[mask]*b[mask]/c[mask]**2 *(x2[mask]**2.*(3.*c[mask]-x2[mask]) - x1[mask]**2.*(3.*c[mask]-x1[mask])))
# sanity check
TestV = np.nansum(4*pi*a*b*c/3)
print(CanopyV.sum(),TestV)
LAD = CanopyV*leafA_per_unitV/plot_area
return LAD, CanopyV
# An alternative model providing more generic canopy shapes - currently assume radial symmetry around trunk. The crown
# volume in a given layer is determined by the volume of revolution of the function r = a*D^b
def calculate_LAD_profiles_generic(canopy_layers, Area, D, Ht, beta, plot_area, leafA_per_unitV=1.):
r_max = np.sqrt(Area/np.pi)
layer_thickness = np.abs(canopy_layers[1]-canopy_layers[0])
N_layers = canopy_layers.size
CanopyV = np.zeros(N_layers)
pi=np.pi
zeros = np.zeros(Ht.size)
# Formula for volume of revolution of power law function r = alpha*D^beta:
# V = pi*(r_max/D_max^beta)^2/(2*beta+1) * (D2^(2beta+1) - D1^(2beta+1))
# where alpha = (r_max/D_max^beta)^2
for i in range(0,N_layers):
ht_u = canopy_layers[i]
ht_l = ht_u-layer_thickness
mask = np.all((Ht>=ht_l,Ht-D<=ht_u),axis=0)
d1 = np.max((Ht-ht_u,zeros),axis=0)
d2 = np.min((Ht-ht_l,D),axis=0)
CanopyV[i]+= np.sum( pi*(r_max[mask]/D[mask]**beta)**2/(2*beta+1) * (d2[mask]**(2*beta+1) - d1[mask]**(2*beta+1.)) )
# sanity check
TestV = np.nansum(pi*D*r_max**2/(2*beta+1.))
precision_requirement = 10**-8
if CanopyV.sum() <= TestV - precision_requirement:
print("Issue - sanity check fail: ", CanopyV.sum(),TestV)
LAD = CanopyV*leafA_per_unitV/plot_area
return LAD, CanopyV
def calculate_LAD_profiles_generic_mc(canopy_layers, Area, D, Ht, beta_min, beta_max,
plot_area, leafA_per_unitV=1.):
r_max = np.sqrt(Area/np.pi)
layer_thickness = np.abs(canopy_layers[1]-canopy_layers[0])
N_layers = canopy_layers.size
CanopyV = np.zeros(N_layers)
pi=np.pi
zeros = np.zeros(Ht.size)
D[D>Ht]=Ht[D>Ht]
# Formula for volume of revolution of power law function r = alpha*D^beta:
# V = pi*(r_max/D_max^beta)^2/(2*beta+1) * (D2^(2beta+1) - D1^(2beta+1))
# where alpha = (r_max/D_max^beta)^2
n_trees = Ht.size
beta=np.random.rand(n_trees)*(beta_max-beta_min)+beta_min
beta[:]=0.6
for i in range(0,N_layers):
ht_u = canopy_layers[i]
ht_l = ht_u-layer_thickness
mask = np.all((Ht>=ht_l,Ht-D<=ht_u),axis=0)
d1 = np.max((Ht-ht_u,zeros),axis=0)
d2 = np.min((Ht-ht_l,D),axis=0)
CanopyV[i]+= np.sum( pi*(r_max[mask]/D[mask]**beta[mask])**2/(2*beta[mask]+1) * (d2[mask]**(2*beta[mask]+1) - d1[mask]**(2*beta[mask]+1.)) )
# sanity check
TestV = np.nansum(pi*D*r_max**2/(2*beta+1.))
precision_requirement = 10**-8
if CanopyV.sum() <= TestV - precision_requirement:
print("Issue - sanity check fail: ", CanopyV.sum(),TestV)
LAD = CanopyV*leafA_per_unitV/plot_area
return LAD, CanopyV
#---------------------------------------
# SMALL SAFE PLOTS DATA
# some code to load in the survey data on small stems from the small SAFE plots.
# Numbers of stems are given as a per-metre density
# SAFE small plots are 25 m x 25 m
def load_SAFE_small_plot_data(filename, plot_area=625.):
datatype = {'names': ('Block', 'Plot', 'TreeID', 'DBH', 'Height', 'CrownRadius'), 'formats': ('S3','i8','S8','f16','f16','f16')}
data = np.genfromtxt(filename, skip_header = 1, delimiter = ',',dtype=datatype)
block_dict = {}
blocks = np.unique(data['Block'])
# some basic params
bin_width = 0.5
for bb in range(0,blocks.size):
mask = data['Block']==blocks[bb]
plots = np.unique(data['Plot'][mask])
DBH = np.arange(0.,10.,bin_width)+bin_width/2.
n_stems = np.zeros((DBH.size,plots.size))
sum_height = np.zeros((DBH.size,plots.size))
n_height = np.zeros((DBH.size,plots.size))
sum_area = np.zeros((DBH.size,plots.size))
n_area = np.zeros((DBH.size,plots.size))
for pp in range(0,plots.size):
mask2 = np.all((data['Block']==blocks[bb],data['Plot']==plots[pp]),axis=0)
n_trees = mask2.sum()
# loop through trees and only look at trees < 10 cm DBH
for tt in range(n_trees):
dbh = data['DBH'][mask2][tt]
if dbh<10.:
ii = np.floor(dbh/bin_width)
n_stems[ii,pp]+=1.
ht = data['Height'][mask2][tt]
if np.isfinite(ht):
sum_height[ii,pp]+=ht
n_height[ii,pp]+=1
# get plot means
mean_height = sum_height/n_height
# plot
crown_geometry = {}
crown_geometry['dbh'] = DBH[1:]
crown_geometry['stem_density'] = np.mean(n_stems,axis=1)[1:]/plot_area
crown_geometry['height'] = np.mean(mean_height,axis=1)[1:]
block_dict[blocks[bb]]= crown_geometry
return block_dict
# some code to get crown dimensions for stem size distributions
def calculate_crown_dimensions_small_plots(DBH,Ht,stem_density,a_ht, b_ht, CF_ht,
a_area, b_area, CF_area,
a_depth, b_depth, CF_depth):
# Get rid of bins with no trees
Ht = Ht[stem_density>0]
DBH = DBH[stem_density>0]
stem_density = stem_density[stem_density>0]
# Gapfill record with local allometry
# Heights
mask = np.isnan(Ht)
Ht[mask] = CF_ht*a_ht*DBH[mask]**b_ht
#Crown areas
Area = CF_area*a_area*DBH**b_area
# Apply canopy depth model
Depth = CF_depth*a_depth*Ht**b_depth
# Remove any existing nodata values (brought forwards from input data
mask = np.all((~np.isnan(Depth),~np.isnan(Ht),~np.isnan(Area)),axis=0)
Depth = Depth[mask]
Ht = Ht[mask]
Area = Area[mask]
return Ht, Area, Depth, stem_density
# calculate the crown profiles from the stem size distributions
def calculate_LAD_profiles_from_stem_size_distributions(canopy_layers, Area, D, Ht,
stem_density, beta, leafA_per_unitV=1.):
r_max = np.sqrt(Area/pi)
layer_thickness = np.abs(canopy_layers[1]-canopy_layers[0])
N_layers = canopy_layers.size
CanopyV = np.zeros(N_layers)
zeros = np.zeros(Ht.size)
# Formula for volume of revolution of power law function r = alpha*D^beta:
# V = pi*(r_max/D_max^beta)^2/(2*beta+1) * (D2^(2beta+1) - D1^(2beta+1))
# where alpha = (r_max/D_max^beta)^2
for i in range(0,N_layers):
ht_u = canopy_layers[i]
ht_l = ht_u-layer_thickness
mask = np.all((Ht>=ht_l,Ht-D<=ht_u),axis=0)
d1 = np.max((Ht-ht_u,zeros),axis=0)
d2 = np.min((Ht-ht_l,D),axis=0)
CanopyV[i]+= np.sum( pi*(r_max[mask]/D[mask]**beta)**2/(2*beta+1) *
(d2[mask]**(2*beta+1) - d1[mask]**(2*beta+1))*stem_density[mask] )
LAD = CanopyV*leafA_per_unitV
return LAD
# as above for ellipsoidal geometry
def calculate_LAD_profiles_ellipsoid_from_stem_size_distributions(canopy_layers,
Area, D, Ht, stem_density, leafA_per_unitV=1.):
# ellipsoid dims
a = np.sqrt(Area/pi)
b=a.copy()
c = D/2.
z0 = Ht-c
layer_thickness = np.abs(canopy_layers[1]-canopy_layers[0])
N_layers = canopy_layers.size
CanopyV = np.zeros(N_layers)
zeros = np.zeros(a.size)
# Formula for volume of ellipsoidal cap:
# V = 1/3*pi*a*b*x**2*(3c-x)/c**2
# where x is the vertical distance from the top of the ellipsoid along axis c.
# Therefore ellipsoidal slice between x1 and x2:
# V = (1/3*pi*a*b*/c**2)*(x2**2*(3c-x2)-x2**2*(3c-x2))
for i in range(0,N_layers):
ht_u = canopy_layers[i]
ht_l = ht_u-layer_thickness
mask = np.all((z0+c>=ht_l,z0-c<=ht_u),axis=0)
x1 = np.max((z0+c-ht_u,zeros),axis=0)
x2 = np.min((z0+c-ht_l,2*c),axis=0)
CanopyV[i]+= np.sum( pi/3.*a[mask]*b[mask]/c[mask]**2 *
(x2[mask]**2.*(3.*c[mask]-x2[mask])-
x1[mask]**2.*(3.*c[mask]-x1[mask]))*stem_density[mask] )
LAD = CanopyV*leafA_per_unitV
return LAD
#=====================================================================================================================
# Load in data for SAFE detailed subplot census - all trees >2 cm -
# only interested in a subset of the fields
def load_SAFE_small_stem_census(filename, sp_area=20.**2, N_subplots = 25):
datatype = {'names': ('Plot', 'Subplot', 'Date', 'Obs','tag', 'DBH_ns',
'DBH_ew', 'H_POM', 'Height', 'Flag', 'Notes'),
'formats': ('S8','i8','S10','S64','int_','f',
'f','f','f','S4','S32')}
data = np.genfromtxt(filename, skip_header = 1, usecols=np.arange(0,11),
delimiter = ',',dtype=datatype)
data['DBH_ns']/=10. # convert from mm to cm
data['DBH_ew']/=10. # convert from mm to cm
# loop through data & remove lianas
N = data['Plot'].size
mask = np.ones(N,dtype='bool')
for i in range(0,N):
if(b'liana' in data['Notes'][i]):
mask[i] = False
elif(b'Liana' in data['Notes'][i]):
mask[i] = False
# Remove lianas and dead trees
data = data[mask]
data = data[data['Flag']!='dead']
data = data[data['Flag']!='dead, broken']
data = data[data['Flag']!='liana']
plot_dict = {}
plots = np.unique(data['Plot'])
for pp in range(plots.size):
plot_data = data[data['Plot']==plots[pp]]
subplots = np.unique(plot_data['Subplot'])
# some basic params
bin_width = 0.5
DBH = np.arange(0.,10.,bin_width)+bin_width/2.
n_stems_i = np.zeros((DBH.size,subplots.size))
n_stems = np.zeros((DBH.size,N_subplots))
stem_dict = {}
sp_present = np.zeros(N_subplots,dtype='bool')
for ss in range(0,subplots.size):
sp_present[subplots[ss]-1] = True
mask = plot_data['Subplot']==subplots[ss]
n_trees = mask.sum()
sp_data = plot_data[mask]
# loop through trees and only look at trees < 10 cm DBH
for tt in range(n_trees):
dbh = (sp_data['DBH_ns'][tt]+sp_data['DBH_ew'][tt])/2.
if dbh<10.:
ii = np.floor(dbh/bin_width).astype('int')
n_stems_i[ii,ss]+=1.
n_stems[ii,subplots[ss]-1]+=1.
average = np.mean(n_stems_i,axis=1)
for ss in range(0,N_subplots):
if ~sp_present[ss]:
n_stems[:,ss] = average
stem_dict['dbh'] = DBH[1:]
stem_dict['stem_density'] = n_stems[1:,:]/sp_area
plot_dict[plots[pp]]=stem_dict
return plot_dict
#=====================================================================================================================
# Load in data for Danum detailed census - every subplot censused
def load_Danum_stem_census(filename, sp_area=20.**2):
datatype = {'names': ('Plot', 'Subplot', 'Date', 'x', 'y','tag','spp', 'Nstem', 'DBH1', 'Codes', 'Notes', 'DBH2', 'DBH3', 'DBH4'), 'formats': ('S6','int_','S10','int_','int_','S6','S6','int_','f','S8','S32','f','f','f')}
data = np.genfromtxt(filename, skip_header = 1, delimiter = ',',dtype=datatype)
data['DBH1']/=10. # convert from mm to cm
data['DBH2']/=10. # convert from mm to cm
data['DBH3']/=10. # convert from mm to cm
data['DBH4']/=10. # convert from mm to cm
subplots = np.unique(data['Subplot'])
N_subplots = subplots.size
stem_dict = {}
# some basic params
bin_width = 0.5
DBH = np.arange(0.,10.,bin_width)+bin_width/2.
n_stems = np.zeros((DBH.size,N_subplots))
for ss in range(0,N_subplots):
mask = data['Subplot']==subplots[ss]
n_trees = mask.sum()
sp_data = data[mask]
# loop through trees and only look at trees < 10 cm DBH
for tt in range(n_trees):
dbh = sp_data['DBH1'][tt]
if dbh<10.:
ii = np.floor(dbh/bin_width).astype('int')
n_stems[ii,subplots[ss]-1]+=1.
# loop through 2nd stems
m2 = sp_data['DBH2']>0
n_trees2 = np.sum(m2)
for tt in range(n_trees2):
dbh = sp_data['DBH2'][m2][tt]
if dbh<10.:
ii = np.floor(dbh/bin_width).astype('int')
n_stems[ii,subplots[ss]-1]+=1.
# loop through 3rd stems
m3 = sp_data['DBH3']>0
n_trees3 = np.sum(m3)
for tt in range(n_trees3):
dbh = sp_data['DBH3'][m3][tt]
if dbh<10.:
ii = np.floor(dbh/bin_width).astype('int')
n_stems[ii,subplots[ss]-1]+=1.
# loop through 4th stems
m4 = sp_data['DBH4']>0
n_trees4 = np.sum(m4)
for tt in range(n_trees4):
dbh = sp_data['DBH4'][m4][tt]
if dbh<10.:
ii = np.floor(dbh/bin_width).astype('int')
n_stems[ii,subplots[ss]-1]+=1.
stem_dict['dbh'] = DBH[1:]
stem_dict['stem_density'] = n_stems[1:,:]/sp_area
return stem_dict
# calculate crown geometries based on stem distributions only
def calculate_crown_dimensions_for_stem_distributions(DBH,stem_density,a_ht, b_ht, CF_ht,
a_area, b_area, CF_area, a_depth, b_depth, CF_depth):
# Get rid of bins with no trees
DBH = DBH[stem_density>0]
stem_density = stem_density[stem_density>0]
# Gapfill record with local allometry
# Heights
Ht = CF_ht*a_ht*DBH**b_ht
#Crown areas
Area = CF_area*a_area*DBH**b_area
# Apply canopy depth model
Depth = CF_depth*a_depth*Ht**b_depth
# Remove any existing nodata values (brought forwards from input data)
mask = np.all((~np.isnan(Depth),~np.isnan(Ht),~np.isnan(Area)),axis=0)
Depth = Depth[mask]
Ht = Ht[mask]
Area = Area[mask]
return Ht, Area, Depth, stem_density
#=============================================================================================================================
# 3-dimensional crown models
# create 3D model of individual crown within a specified environment.
# Voxels are included if the centre is contained within the crown.
# This imposes the following constraints:
# - The voxel is in crown if:
# 1) 0 <= z' <= Zmax
# 2) r <= Rmax/Zmax^beta * z'^beta;
# where r = sqrt((x-x0)**2+(y-y0)**2)
#
# Note that we do not have data on crowns for trees outwith each plot
# that overlap into the plot area. To compensate, we include in our
# plot-level estimates the full crown volume within each tree inside
# the plot, even if these overlap outside the plot bounds. This assumes
# that overlap from trees outwith the plot more or less equals overlap
# from trees inside the plot beyond the plot footprint.
#
# Input variables:
# - canopy_matrix (the three dimensional matrix representing the canopy space - dimensions x,y,z)
# - x,y,z (vectors containing the centre coordinates of each voxel
# - x0,y0 (horizontal coordinates of stem)
# - Z0 (the depth from the top of the domain to the tree top
# - Zmax (the maximum crown depth)
# - Rmax (the maximum radius of the tree)
# - beta (the exponent controlling the canopy morphology)
# Returns:
# - 0 (the canopy_matrix array is updated with the new crown)
def generate_3D_crown(canopy_matrix,x,y,z,x0,y0,Z0,Zmax,Rmax,beta):
#from matplotlib import pyplot as plt
# generate masks for tree crown
xm,ym,zm = np.meshgrid(x,y,z)
r = np.sqrt((xm-x0)**2+(ym-y0)**2)
con = np.all((zm>=Z0,zm<=Zmax+Z0,r <= ((zm-Z0)**beta) * Rmax/(Zmax**beta)),axis=0)
#crown = np.zeros(canopy_matrix.shape)
canopy_matrix[con] = 1
return 0
#return crown
# alternative is to use ellipsoid crowns, a similar approach has been used by other
# researchers. Voxels can readily be assigned to ellpsoidal crowns based on the
# inequality:
# 1 >= ((x-x0)/a)^2 + ((y-y0)/b)^2 + ((z-z0)/c)^2
# where:
# - x,y,z = coordinates of voxel
# - x0,y0 = trunk location in x and y
# - z0 = Ht-CrownDepth/2
# - a=b = radius of crown, R
# - c = CrownDepth/2
#
# Input variables:
# - canopy_matrix (the three dimensional matrix representing the canopy space - dimensions x,y,z)
# - x,y,z (vectors containing the centre coordinates of each voxel
# - x0,y0 (horizontal coordinates of stem)
# - H (the height of the top of the tree)
# - D (the maximum crown depth)
# - R (the maximum radius of the tree crown)
# Returns:
# - 0 (the canopy_matrix array is updated with the new crown)
def generate_3D_ellipsoid_crown(canopy_matrix,xm,ym,zm,x0,y0,H,D,R):
z0 = H-D/2.
# generate masks for tree crown
#xm,ym,zm = np.meshgrid(x,y,z)
#con = (((xm-x0)/R)**2+((ym-y0)/R)**2+((zm-z0)/(D/2.))**2) <= 1
#canopy_matrix[con]=1
canopy_matrix += ((((xm-x0)/R)**2+((ym-y0)/R)**2+((zm-z0)/(D/2.))**2) <= 1)
#
# 3-D canopy
# This function creates 3D crown model by aggregating individual crowns
# constructed using the above function. It takes in the following input
# variables:
# - x,y,z (vectors containing the centre coordinates of each voxel)
# - x0,y0 (vectors indicating the relative horizontal coordinates of the
# surveyed stems)
# - Z0 (vector containing the depth from the top of the domain to the each
# tree top
# - Zmax (vector containing the maximum crown depth for each tree surveyed)
# - Rmax (vector containing the maximum radius of the tree)
# - beta (vector containing the exponents controlling the canopy morphology)
# - plot_mask (an optional mask that accounts for non-square plot geometries)
# - buffer (an optional argument that by default is zero, but should be
# increased so that it is sufficient to account for crown overlap
def generate_3D_canopy(x,y,z,x0,y0,Z0,Zmax,Rmax,beta):
#from matplotlib import pyplot as plt
# first create buffer
n_trees = x0.size
dx = x[1]-x[0]
dy = y[1]-y[0]
# now create buffered canopy matrix
#crowns = np.zeros((n_trees,y.size,x.size,z.size),dtype='float')
canopy = np.zeros((y.size,x.size,z.size),dtype='float')
# Now loop through the trees. For each tree, calculate the crown volume,
# then add to the crown map.
for tt in range(0,n_trees):
generate_3D_crown(canopy,xm,ym,zm,x0[tt],y0[tt],Z0[tt],Zmax[tt],Rmax[tt],beta[tt])
#plt.imshow(np.transpose(np.sum(canopy,axis=1)),origin='lower');plt.colorbar();plt.show()
return canopy
# alternative function that this time uses ellipsoid crowns
# It takes in the following input variables:
# - x,y,z (vectors containing the centre coordinates of each voxel)
# - x0,y0 (vectors indicating the relative horizontal coordinates of the
# surveyed stems)
# - H (vector containing the Heights of each tree)
# - D (vector containing the crown depth for each tree surveyed)
# - R (vector containing the maximum radius of the tree)
# - plot_mask (an optional mask that accounts for non-square plot geometries)
# - buffer (an optional argument that by default is zero, but should be
# increased so that it is sufficient to account for crown overlap
def generate_3D_ellipsoid_canopy(x,y,z,x0,y0,H,D,R):
# first create buffer
n_trees = x0.size
dx = x[1]-x[0]
dy = y[1]-y[0]
canopy = np.zeros((y.size,x.size,z.size),dtype='float')
xm,ym,zm = np.meshgrid(x,y,z)
# Now loop through the trees. For each tree, calculate the crown volume,
# then add to the crown map.
for tt in range(0,n_trees):
if(tt%50==0):
print('\tprocessing tree %i from %i' % (tt,n_trees))
generate_3D_ellipsoid_crown(canopy,xm,ym,zm,x0[tt],y0[tt],H[tt],D[tt],R[tt])
canopy[canopy>1]=1
return canopy
#===============================================================================
# MONTE CARLO ROUTINES FOR CROWN CONTSTRUCTION
# First version samples from the error distribution simulated from the
# allometric rlationships underpinning the modelled crown geometry (ellipsoid)
def calculate_crown_volume_profiles_mc(x,y,z,x0,y0,Ht,DBH,Area,
a_ht,b_ht,a_A,b_A,a_D,b_D,
field_data,BAAD_data,n_iter=10):
profiles = np.zeros((n_iter,z.size))
for ii in range(0,n_iter):
print('iteration %i out of %i' % (ii,n_iter))
# now get field inventory estimate
# Note that we only deal with the 1ha plot level estimates as errors relating stem based
# vs. area based are problematic at subplot level
Ht[np.isnan(Ht)] = random_sample_from_powerlaw_prediction_interval(DBH[np.isnan(Ht)],
field_data['DBH_field'],field_data['Height_field'],
a_ht,b_ht,array=True)
Area[np.isnan(Area)] = random_sample_from_powerlaw_prediction_interval(DBH[np.isnan(Area)],
field_data['DBH_field'],field_data['CrownArea'],
a_A,b_A,array=True)
Depth = random_sample_from_powerlaw_prediction_interval(Ht,BAAD_data['Ht'],BAAD_data['D'],
a_D,b_D,array=True)
Rmax = np.sqrt(Area/np.pi)
crown_model = generate_3D_ellipsoid_canopy(x,y,z,x0,y0,Ht,Depth,Rmax)
profiles[ii,:] = np.sum(np.sum(crown_model,axis=1),axis=0)/10.**4
return profiles
# second version adds measurement error. Errors are two part lists or arrays
# indicating bias and random error (expressed as an estimated fraction)
def calculate_crown_volume_profiles_mc_with_measurement_error(x,y,z,x0,y0,Ht_,DBH_,Area_,
a_ht,b_ht,a_A,b_A,a_D,b_D,
error,field_data,BAAD_data,n_iter=10):
profiles = np.zeros((n_iter,z.size))
for ii in range(0,n_iter):
# combine random and systematic errors to the observations as fractions
err_Ht = (np.random.normal(scale = error['Ht'][1],size = Ht.size)+error['Ht'][0])
err_DBH = (np.random.normal(scale = error['DBH'][1],size = DBH.size)+error['DBH'][0])
err_Area = (np.random.normal(scale = error['Area'][1],size = Area.size)+error['Area'][0])
# apply errors
DBH = DBH_*(1+err_DBH)
Ht = Ht_*(1+err_Ht_)
DBH = Area_*(1+err_Area)
# Randomly sample allometrics from simulated error distribution
Ht[np.isnan(Ht)] = random_sample_from_powerlaw_prediction_interval(DBH[np.isnan(Ht)],
field_data['DBH_field'],field_data['Height_field'],
a_ht,b_ht,array=True)
Area[np.isnan(Area)] = random_sample_from_powerlaw_prediction_interval(DBH[np.isnan(Area)],
field_data['DBH_field'],field_data['CrownArea'],
a_A,b_A,array=True)
Depth = random_sample_from_powerlaw_prediction_interval(Ht,BAAD_data['Ht'],BAAD_data['D'],
a,b,array=True)
Rmax = np.sqrt(Area/np.pi)
crown_model = field.generate_3D_ellipsoid_canopy(x,y,z,x0,y0,Ht,Depth,Rmax)
profiles[ii,:] = np.sum(np.sum(crown_model,axis=1),axis=0)/10.**4
return profiles
| gpl-3.0 |
gfyoung/numpy | numpy/lib/npyio.py | 2 | 84419 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from numpy.core.overrides import array_function_dispatch
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy.compat import (
asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode,
os_fspath, os_PathLike
)
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
from collections.abc import Mapping
else:
from future_builtins import map
from collections import Mapping
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
# Implement the Mapping ABC
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
if sys.version_info.major == 3:
# deprecate the python 2 dict apis that we supported by accident in
# python 3. We forgot to implement itervalues() at all in earlier
# versions of numpy, so no need to deprecated it here.
def iteritems(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iteritems is deprecated in python 3, to match the "
"removal of dict.itertems. Use .items() instead.",
DeprecationWarning, stacklevel=2)
return self.items()
def iterkeys(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iterkeys is deprecated in python 3, to match the "
"removal of dict.iterkeys. Use .keys() instead.",
DeprecationWarning, stacklevel=2)
return self.keys()
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
own_fid = False
return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
return (arr,)
@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if hasattr(file, 'read'):
fid = file
else:
file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def _savez_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def _savez_compressed_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
if not hasattr(file, 'read'):
file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
if sys.version_info >= (3, 6):
# Since Python 3.6 it is possible to write directly to a ZIP file.
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
force_zip64 = val.nbytes >= 2**30
with zipf.open(fname, 'w', force_zip64=force_zip64) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Stage arrays in a temporary file on disk, before writing to zip.
# Import deferred for startup time improvement
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if '0x' in x:
return float.fromhex(x)
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
return asunicode
else:
return asstr
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
comment. None implies no comments. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will parse the
column string into the desired value. E.g., if column 0 is a date
string: ``converters = {0: datestr2num}``. Converters can also be
used to provide a default value for missing data (but see also
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
max_rows : int, optional
Read `max_rows` lines of content after `skiprows` lines. The default
is to read all the lines.
.. versionadded:: 1.16.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile('|'.join(comments))
if delimiter is not None:
delimiter = _decode_line(delimiter)
user_converters = converters
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
fh = iter(fh)
fown = True
else:
fh = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
# input may be a python2 io stream
if encoding is not None:
fencoding = encoding
# we must assume local encoding
# TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
@recursive
def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
@recursive
def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter. """
line = _decode_line(line, encoding=encoding)
if comments is not None:
line = regex_comments.split(line, maxsplit=1)[0]
line = line.strip('\r\n')
if line:
return line.split(delimiter)
else:
return []
def read_data(chunk_size):
"""Parse each line, including the first.
The file read, `fh`, is a global defined above.
Parameters
----------
chunk_size : int
At most `chunk_size` lines are read at a time, with iteration
until all lines are read.
"""
X = []
line_iter = itertools.chain([first_line], fh)
line_iter = itertools.islice(line_iter, max_rows)
for i, line in enumerate(line_iter):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
if len(X) > chunk_size:
yield X
X = []
if X:
yield X
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
if byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
import functools
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
converters = [conv if conv is not bytes else
lambda x: x.encode(fencoding) for conv in converters]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
for x in read_data(_loadtxt_chunksize):
if X is None:
X = np.array(x, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
if X is None:
X = np.array([], dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
return (X,)
@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
* a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
class WriteWrap(object):
"""Convert to unicode in py2 or to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
self.fh = fh
self.encoding = encoding
self.do_write = self.first_write
def close(self):
self.fh.close()
def write(self, v):
self.do_write(v)
def write_bytes(self, v):
if isinstance(v, bytes):
self.fh.write(v)
else:
self.fh.write(v.encode(self.encoding))
def write_normal(self, v):
self.fh.write(asunicode(v))
def first_write(self, v):
try:
self.write_normal(v)
self.write = self.write_normal
except TypeError:
# input is probably a bytestream
self.write_bytes(v)
self.write = self.write_bytes
own_fh = False
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
# need to convert str to unicode for text io output
if sys.version_info[0] == 2:
fh = WriteWrap(fh, encoding or 'latin1')
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, np.unicode):
regexp = asbytes(regexp)
elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
the first `skip_header` lines. This line can optionally be proceeded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply when `fname` is
a file object. The special value 'bytes' enables backward compatibility
workarounds that ensure that you receive byte arrays when possible
and passes latin1 encoded strings to converters. Override this value to
receive unicode arrays and pass strings as input to converters. If set
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = ''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if comments is not None:
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([str(_.strip()) for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
if isinstance(user_missing_values, bytes):
user_missing_values = user_missing_values.decode('latin1')
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
if conv is bytes:
user_conv = asbytes
elif byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
import functools
user_conv = functools.partial(tobytes_first, conv=conv)
else:
user_conv = conv
converters[i].update(user_conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, user_conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
# miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
# Update string types to be the right length
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
# If the dtype is uniform (before sizing strings)
base = set([
c_type
for c, c_type in zip(converters, column_types)
if c._checked])
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
Shekharrajak/mubosym | mubosym/mubosym_core.py | 1 | 95748 | # -*- coding: utf-8 -*-
"""
all mubosym related core classes
================================
Created on Sun Mar 8 11:50:46 2015
@author: oliver
"""
from __future__ import print_function, absolute_import
import os.path,sys,time,copy
try:
import pandas as pd
no_pandas = False
except:
print( 'can not use pandas here' )
no_pandas = True
from sympy import symbols, lambdify, sign, re, acos, asin, sin, cos, Poly
from sympy.physics.mechanics import ( Vector, ReferenceFrame, Point, dynamicsymbols, outer,
RigidBody, KanesMethod, gradient)
from sympy.solvers import solve as sp_solve
#Vector.simp = True
from numpy import array, hstack, vstack, ones, zeros, linspace, pi, sqrt
from numpy.linalg import eig
from numpy.linalg import solve as np_solve
from scipy.linalg import solve as sc_solve #, lu_solve
#from scipy.sparse.linalg import factorized
from scipy.integrate import ode, odeint
import mubosym as mbs
from matplotlib import pyplot as plt
#########################################
from dbconnect import dbHandler
from symTools import list_to_world, worldData
#########################################
from interp1d_interface import interp
from one_body_force_model_interface import one_body_force_model
from simple_tire_model_interface import simple_tire_model
### Imports always on top...
from vpython_3d import animation
class ParameterError(Exception):
"""
Exception raised for errors in the parameters
:param paras: input expression in which the error occurred
:param n_soll: explanation of the error
"""
def __init__(self, paras, n_soll, name):
self.msg = name+"... caused by: "+str(paras)+". Please give me "+str(n_soll)+" parameters"
super(ParameterError, self).__init__(self.msg)
class InputError(Exception):
"""
Exception raised for errors in the parameters
:param paras: input expression in which the error occurred
:param n_soll: explanation of the error
"""
def __init__(self, input):
self.msg = 'Wrong Input caused by: ' + input
super(InputError, self).__init__(self.msg)
IF = ReferenceFrame('IF') # Inertial reference frame
O = Point('O') # Origin point
O.set_vel(IF, 0) # Origin's velocity is zero
g, t = symbols('g t') # Gravity and time
class MBSframe(ReferenceFrame):
"""
This class represents a moving frame. (sympy only provides rotating frames up to now)
"""
def __init__(self,Name):
global IF, O, g, t
ReferenceFrame.__init__(self,Name)
self.Orig = Point('O_'+Name)
self.Orig.set_pos(O, 0.*IF.x)
self.phi, self.theta, self.psi = symbols('phi theta psi')
self.pos = 0.*IF.x
self.vel = 0.*IF.x
self.acc = 0.*IF.x
self.dicts = {}
self.free_dict = {}
def set_pos_vec_IF(self, vec):
self.pos = vec
self.vel = vec.diff(t, IF)
self.acc = self.vel.diff(t, IF)
self.Orig.set_pos(O, vec)
self.Orig.set_vel(IF, self.vel)
def set_vel_vec_IF(self, vec):
self.Orig.set_vel(IF, vec)
self.vel = vec
def get_vel_vec_IF(self):
return self.Orig.vel(IF)
def set_pos_Pt(self, Pt):
self.Orig = Pt
def get_pos_Pt(self):
return self.Orig
def get_pos_IF(self):
return self.Orig.pos_from(O).express(IF)
def get_pos_SELF(self):
return self.Orig.pos_from(O).express(self)
def express_vec_in(self, vec):
return vec.express(self, variables=True) - self.get_pos_SELF()
def get_omega(self, frame):
return self.ang_vel_in(frame)
def get_ex_IF(self):
return self.x.express(IF)
def get_ey_IF(self):
return self.y.express(IF)
def get_ez_IF(self):
return self.z.express(IF)
def px(self):
return self.pos.dot(IF.x).subs(self.dicts)
def py(self):
return self.pos.dot(IF.y).subs(self.dicts)
def pz(self):
return self.pos.dot(IF.z).subs(self.dicts)
def px_dt(self):
return self.vel.dot(IF.x).subs(self.dicts)
def py_dt(self):
return self.vel.dot(IF.y).subs(self.dicts)
def pz_dt(self):
return self.vel.dot(IF.z).subs(self.dicts)
def px_ddt(self):
return self.acc.dot(IF.x).subs(self.dicts)
def py_ddt(self):
return self.acc.dot(IF.y).subs(self.dicts)
def pz_ddt(self):
return self.acc.dot(IF.z).subs(self.dicts)
def set_freedoms_dict(self, free_dict):
self.free_dict = free_dict
def get_phi(self):
if self.phi in self.free_dict:
return self.free_dict[self.phi]
else:
return 0.
def get_theta(self):
if self.theta in self.free_dict:
return self.free_dict[self.theta]
else:
return 0.
def get_psi(self):
if self.psi in self.free_dict:
return self.free_dict[self.psi]
else:
return 0.
def set_dicts(self, dicts):
for d in dicts:
self.dicts.update(d)
class MBSbody(object):
def __init__(self, n, name, mass, I, pos, vel, frame, joint, N_att, N_att_fixed, atm = ''):
self.n = n
self.frame = frame
self.joint = joint
self.vel = vel
self.pos = pos
self.name = name
self.mass = mass
self.I = I
self.N_att = N_att
self.N_att_fixed = N_att_fixed
self.attached_to_marker = atm #string name of marker
self.small_angles = []
self.free_dict = {}
def get_vel(self):
return self.vel
def get_vel_magnitude(self):
return self.vel.magnitude()
def get_pos(self):
return self.pos
def Pt(self):
return self.frame.get_pos_Pt()
def x(self):
return self.frame.px()
def y(self):
return self.frame.py()
def z(self):
return self.frame.pz()
def phi(self):
return self.frame.phi()
def x_dt(self):
return self.frame.px_dt()
def y_dt(self):
return self.frame.py_dt()
def z_dt(self):
return self.frame.pz_dt()
def x_ddt(self):
return self.frame.px_ddt()
def y_ddt(self):
return self.frame.py_ddt()
def z_ddt(self):
return self.frame.pz_ddt()
def get_phi(self):
return self.frame.get_phi()
def get_theta(self):
return self.frame.get_theta()
def get_psi(self):
return self.frame.get_psi()
def get_frame(self):
return self.frame
def get_n(self):
return self.n
def get_N_att(self):
return self.N_att
def get_N_att_fixed(self):
return self.N_att_fixed
def get_mass(self):
return self.mass
def set_freedoms_dict(self, f_dict):
self.frame.set_freedoms_dict(f_dict)
self.free_dict = f_dict
def set_dicts(self, dicts):
self.frame.set_dicts(dicts)
def set_small_angles(self, angle_list):
self.small_angles = angle_list
def get_small_angles(self):
return self.small_angles
class MBScontrolSignal(object):
def __init__(self, expr, name, unit):
self.name = name
self.expr = expr
self.unit = unit
self.lamb = None
self.value = 0.
def get_expr(self):
return self.expr
def lambdify(self, args):
self.lamb = lambdify(args, self.expr)
def get_signal(self, args):
return self.lamb(*args)
def subs_dict(self,mdict):
self.expr = self.expr.subs(mdict)
def calc_signal(self, args):
self.value = self.lamb(*args)
return self.value
def get_signal_value(self):
return self.value
class MBSmarker(object):
def __init__(self, name, frame, body_name):
self.name = name
self.frame = frame
self.body_name = body_name #here the name is better
self.dicts = {}
def get_frame(self):
return self.frame
def get_body_name(self):
return self.body_name
def Pt(self):
return self.frame.get_pos_Pt()
def x(self):
return self.frame.px()
def y(self):
return self.frame.py()
def z(self):
return self.frame.pz()
def x_dt(self):
return self.frame.px_dt()
def y_dt(self):
return self.frame.py_dt()
def z_dt(self):
return self.frame.pz_dt()
def x_ddt(self):
return self.frame.px_ddt()
def y_ddt(self):
return self.frame.py_ddt()
def z_ddt(self):
return self.frame.pz_ddt()
############
# TODO: define psi, theta phi output
#
def set_dicts(self, dicts):
self.frame.set_dicts(dicts)
class MBSparameter(object):
def __init__(self, name, sym, sym_dt, sym_ddt, func, func_dt, func_ddt, diff_dict, const = 0.):
self.name = name
self.sym = sym
self.sym_dt = sym_dt
self.sym_ddt = sym_ddt
self.func = func
self.func_dt = func_dt
self.func_ddt = func_ddt
self.diff_dict = diff_dict
self.c = const
def get_func(self):
return self.func, self.func_dt, self.func_ddt
def get_paras(self):
return self.sym, self.sym_dt, self.sym_ddt
def get_diff_dict(self):
return self.diff_dict
def set_constant(self, c):
self.c = c
class MBSmodel(object):
def __init__(self, name, reference):
self.name = name
self.ref = reference
def add_signal(self, expr):
self.ref.add_signal(expr)
class MBSjoint(object):
def __init__(self, name):
self.name = name
self.x, self.y, self.z = symbols('x y z')
self.phi, self.theta, self.psi = symbols('phi theta psi')
self.rot_order = [self.phi, self.theta, self.psi]
self.trans = [self.x, self.y, self.z]
self.rot_frame = 0
self.trans_frame = 0
self.free_list = []
self.const_list = []
self.correspondence = {self.phi: 'X', self.theta: 'Y', self.psi: 'Z'}
self.c_string = 'XYZ'
self.n_free = 0
def define_rot_order(self, order):
self.rot_order = order
self.c_string = ''
for s in self.rot_order:
self.c_string += self.correspondence[s]
def define_freedoms(self, free_list):
self.free_list = free_list
self.n_free = len(free_list)
def define_constants(self, const_list):
self.const_list = const_list
##########################################################################
# define useful joints here ...
joints = []
joints.append(MBSjoint('rod-1-cardanic-efficient'))
joints[-1].define_freedoms([joints[-1].psi])
joints[-1].define_constants([joints[-1].y, joints[-1].theta])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 0
joints.append(MBSjoint('rod-1-cardanic'))
joints[-1].define_freedoms([joints[-1].psi])
joints[-1].define_constants([joints[-1].y, joints[-1].theta])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('x-axes'))
joints[-1].define_freedoms([joints[-1].x])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('y-axes'))
joints[-1].define_freedoms([joints[-1].y])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('z-axes'))
joints[-1].define_freedoms([joints[-1].z])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('angle-rod'))
joints[-1].define_freedoms([joints[-1].theta])
joints[-1].define_constants([joints[-1].phi, joints[-1].y])
joints[-1].define_rot_order([joints[-1].theta, joints[-1].phi, joints[-1].psi])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('rod-zero-X'))
joints[-1].define_freedoms([])
joints[-1].define_constants([joints[-1].x])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('rod-zero-Y'))
joints[-1].define_freedoms([])
joints[-1].define_constants([joints[-1].y])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('rod-zero-Z'))
joints[-1].define_freedoms([])
joints[-1].define_constants([joints[-1].z])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('rod-1-revolute'))
joints[-1].define_freedoms([joints[-1].theta])
joints[-1].define_rot_order([joints[-1].theta, joints[-1].phi, joints[-1].psi])
joints[-1].define_constants([joints[-1].y])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('free-3-translate-z-rotate'))
joints[-1].define_freedoms([joints[-1].theta, joints[-1].x, joints[-1].y, joints[-1].z])
joints[-1].define_constants([])
joints[-1].trans_frame = 0
joints[-1].rot_frame = 2
joints.append(MBSjoint('xz-plane'))
joints[-1].define_freedoms([joints[-1].x, joints[-1].z])
joints[-1].define_constants([])
joints[-1].trans_frame = 0
joints[-1].rot_frame = 0
joints.append(MBSjoint('xy-plane'))
joints[-1].define_freedoms([joints[-1].x, joints[-1].y])
joints[-1].define_constants([])
joints[-1].trans_frame = 0
joints[-1].rot_frame = 0
joints.append(MBSjoint('rod-2-cardanic'))
joints[-1].define_freedoms([joints[-1].psi, joints[-1].theta])
joints[-1].define_rot_order([joints[-1].psi, joints[-1].theta, joints[-1].phi])
joints[-1].define_constants([joints[-1].y])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('rod-2-revolute-scharnier'))
joints[-1].define_freedoms([joints[-1].theta, joints[-1].phi])
joints[-1].define_rot_order([joints[-1].theta, joints[-1].phi, joints[-1].psi])
joints[-1].define_constants([joints[-1].y])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('free-3-rotate'))
joints[-1].define_freedoms([joints[-1].phi, joints[-1].theta, joints[-1].psi])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('free-3-translate'))
joints[-1].define_freedoms([joints[-1].x, joints[-1].y, joints[-1].z])
joints[-1].define_constants([])
joints[-1].trans_frame = 0
joints[-1].rot_frame = 2
joints.append(MBSjoint('revolute-X'))
joints[-1].define_freedoms([joints[-1].phi])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('revolute-Y'))
joints[-1].define_freedoms([joints[-1].theta])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('revolute-Z'))
joints[-1].define_freedoms([joints[-1].psi])
joints[-1].define_constants([])
joints[-1].trans_frame = 1
joints[-1].rot_frame = 2
joints.append(MBSjoint('free-6'))
joints[-1].define_freedoms([joints[-1].phi, joints[-1].theta, joints[-1].psi, joints[-1].x, joints[-1].y, joints[-1].z])
joints[-1].define_constants([])
joints[-1].trans_frame = 0
joints[-1].rot_frame = 0
joints_names = [oo.name for oo in joints]
def_joints = dict(zip(joints_names, joints))
######################################################################
class MBSio(object):
"""
This class creates a dummy MBSworld object and returns it for the user to play with.
But maybee an IO handling class should be created instead?
imagine just passing a MBSworld object and executing animate() from here
"""
def __init__(self,filename,MBworld=None,save=False,params = ['state', 'orient', 'con', 'e_kin', 'time', 'x_t', 'acc', 'e_pot', 'e_tot', 'e_rot', 'speed', 'model_signals_results']):
if MBworld == None:
pass
#self.MBworld = object()
if not save:
self.__read__(filename,params)
if save:
self.store = pd.HDFStore(filename,complevel=9, complib='bzip2', fletcher32=True)
self.__save__(params,MBworld)
def __save__(self,params,MBworld):
"""
creates a Store object or buffer
"""
# for par in params:
# self.store[par] = getattr(MBworld,par)
#
# self.store.close()
self.store['state'] = pd.DataFrame(MBworld.state[:,:3],columns=['x', 'y', 'z']) # 3 includes 3d cartesians
self.store['orient'] = pd.DataFrame(MBworld.orient[:,:6],columns=['ex_x', 'ex_y', 'ex_z', 'eys_x', 'ey_y', 'ey_z']) #2 cartesians vectors e_x, e_y
self.store['con'] = pd.DataFrame(MBworld.con) # 3d cartesian vector from-to (info)
self.store['e_kin'] = pd.DataFrame(MBworld.e_kin)
self.store['time'] = pd.DataFrame(MBworld.time)
self.store['x_t'] = pd.DataFrame(MBworld.x_t)
self.store['acc'] = pd.DataFrame(MBworld.acc)
self.store['e_pot'] = pd.DataFrame(MBworld.e_pot)
self.store['e_tot'] = pd.DataFrame(MBworld.e_tot)
self.store['e_rot'] = pd.DataFrame(MBworld.e_rot)
self.store['speed'] = pd.DataFrame(MBworld.speed)
self.store['model_signals_results'] = pd.Series(MBworld.model_signals)
# here we must consider on how to store the data properly...
# self.store['vis_body_frames'] = pd.DataFrame(self.vis_body_frames) #1 frame moving
# self.store['vis_forces'] = pd.DataFrame(self.vis_forces) # 1 force on body
# self.store['vis_frame_coords'] = pd.DataFrame(self.vis_frame_coords)
# self.store['vis_force_coords'] = pd.DataFrame(self.vis_force_coords)
def __read__(self,filename,params):
for par in params:
setattr(self,par,pd.read_hdf(filename,par))
#return self
#here we must consider on how to store the data properly...
#self.store['vis_body_frames'] = pd.DataFrame(self.vis_body_frames) #1 frame moving
#self.store['vis_forces'] = pd.DataFrame(self.vis_forces) # 1 force on body
#self.store['vis_frame_coords'] = pd.DataFrame(self.vis_frame_coords)
#self.store['vis_force_coords'] = pd.DataFrame(self.vis_force_coords)
def animate(self):
pass
class MBSworld(object):
"""
All storages lists and dictionaries for a full multibody sim world. Keeps track of every frame, body, force and torque.
Includes also parameters and external model interface (e.g. tire).
"""
def __init__(self, name = '', connect=False, force_db_setup=False):
# setup the world
global IF, O, g, t
self.n_body = -1 # number of the actual body
self.name = name
self.mydb = None
self.connect = connect
if connect:
self.mydb = dbHandler(mbs.BASE_PATH+'/data')
if not self.mydb.has_key(name) or force_db_setup:
self.db_setup = True
else:
self.db_setup = False
self.bodies = {} # name mapping
self.parameters = [] #keep the order correct, initialized in kaneify
self.parameters_diff = {} #helper dict, initialized in kaneify
self.q = [] # generalized coordinates nested list
self.u = [] # generalized velocities nested list
self.a = [] # generalized accelerations nested list
self.q_flat = [] # generalized coordinates flat list
self.u_flat = [] # generalized velocities flat list
self.a_flat = [] # generalized accelerations flat list
self.f_ext_act = [] # the actually added external forces (needed scalar symbols)
self.f_int_act = [] # the actually added internal forces (needed scalar symbols)
self.m = [] # Mass of each body
self.Ixx = [] # Inertia xx for each body
self.Iyy = [] # Inertia yy for each body
self.Izz = [] # Inertia zz for each body
self.forces_ext_n = 0 #number of actual external forces (counting)
self.forces_int_n = 0 #number of actual internal forces (counting)
self.forces_models_n = 0 #number of model input functions
self.f_int_expr = [] #to keep expression of all dofs results in one scalar var
self.f_int_lamb = [] #same but lambidified, input always the full state vec (even if not all of it is used)
self.f_int_func = [] #storage for the function handles (for the one scalar corresponding in f_int_lamb)
self.f_ext_func = [] #to keep some func references (python function handles)
self.f_t_models_sym = [] #general storage list for symbols for model forces
self.models = [] #general storage list for model handlers
self.models_obj = {} #storage dict for model objects
self.f_models_lamb = []
self.kl = None
self.forces = []
self.torques = []
self.particles = []
self.kindiffs = []
self.kindiff_dict = {}
self.accdiff = []
self.accdiff_dict = {}
self.body_frames = {}
self.body_list_sorted = []
self.bodies_in_graphics = {}
self.eq_constr = []
self.n_constr = []
self.dof = 0
self.IF_mbs = MBSframe('IF_mbs')
self.IF_mbs.orient(IF, 'Axis', [0.,IF.z])
self.IF_mbs.set_pos_Pt(O)
self.pot_energy_saver = []
self.con_type = [] #just for grafics
self.body_frames[999] = self.IF_mbs
self.bodies.update({'world':999})
self.tau_check = 0.
# new obj-paradigm starts here
self.control_signals_obj = []
self.bodies_obj = {}
self.marker_obj = {}
self.marker_fixed_obj = {}
self.param_obj = [] #name, pos, vel, frame, joint, ref_frame):
self.bodies_obj.update({'world': MBSbody(999,'world', 0., [0.,0.,0.], self.IF_mbs.get_pos_IF(),self.IF_mbs.get_vel_vec_IF(),self.IF_mbs,'',self.IF_mbs,self.IF_mbs)})
self.add_marker('world_M0', 'world',0.,0.,0.)
def add_control_signal(self, expr, name = '', unit = ''):
self.control_signals_obj.append(MBScontrolSignal(expr, name, unit))
def add_parameter(self, new_para_name, fct_para, fct_para_dt , fct_para_ddt):
global IF, O, g, t
params_n = len(self.param_obj)
p = dynamicsymbols('para_'+str(params_n))
p_dt = dynamicsymbols('para_dt'+str(params_n))
p_ddt = dynamicsymbols('para_ddt'+str(params_n))
diff_dict = {p.diff(): p_dt, p_dt.diff(): p_ddt}
self.param_obj.append(MBSparameter(new_para_name,p,p_dt,p_ddt,fct_para,fct_para_dt,fct_para_ddt, diff_dict))
def add_parameter_expr(self, new_para_name, expression, const = {}):
global IF, O, g, t
params_n = len(self.param_obj)
p = dynamicsymbols('para_'+str(params_n))
p_dt = dynamicsymbols('para_dt'+str(params_n))
p_ddt = dynamicsymbols('para_ddt'+str(params_n))
diff_dict = {p.diff(): p_dt, p_dt.diff(): p_ddt}
expression = expression.subs(const)
dt0 = lambdify(t,expression)
dt1 = lambdify(t,expression.diff(t))
dt2 = lambdify(t,expression.diff(t).diff(t))
self.param_obj.append(MBSparameter(new_para_name,p,p_dt,p_ddt,dt0,dt1,dt2,diff_dict))
def exchange_parameter(self, para_name, fct_para, fct_para_dt , fct_para_ddt):
pobj = [ o for o in self.param_obj if o.name == para_name ][0]
pobj.func = dt0
pobj.func_dt = dt1
pobj.func_ddt = dt2
def exchange_parameter_expr(self, para_name, expression, const = {}):
global IF, O, g, t
expression = expression.subs(const)
dt0 = lambdify(t,expression)
dt1 = lambdify(t,expression.diff(t))
dt2 = lambdify(t,expression.diff(t).diff(t))
pobj = [ o for o in self.param_obj if o.name == para_name ][0]
pobj.func = dt0
pobj.func_dt = dt1
pobj.func_ddt = dt2
def add_rotating_marker_para(self, new_marker_name, str_n_related, para_name, vx, vy, vz, axes):
"""
Add a rotating marker framer with parameter related phi
:param str new_marker_name: the new marker name
:param str str_n_related: the reference fixed body frame
:param str para_name: the parameter name which is the rotation angle
:param float vx, vy, vz: the const. translation vector components in the related frame
:param str axes:
"""
try:
body = self.bodies_obj[str_n_related]
except:
raise InputError("no such body (make body first) %s" % str_n_related)
try:
phi = [x.sym for x in self.param_obj if x.name == para_name][0]
except:
raise InputError("no such parameter name %s" % str(para_name))
MF = MBSframe('MF_'+new_marker_name)
MF_fixed = MBSframe('MF_fixed_'+new_marker_name)
N_fixed = body.get_frame()
n_related = body.get_n()
if axes == 'Y':
MF.orient(N_fixed,'Body',[0.,phi,0.], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[0.,phi,0.], 'XYZ')
elif axes == 'Z':
MF.orient(N_fixed,'Body',[0.,0.,phi], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[0.,0.,phi], 'XYZ')
elif axes == 'X':
MF.orient(N_fixed,'Body',[phi, 0.,0.], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[phi, 0.,0.], 'XYZ')
pos_0 = N_fixed.get_pos_IF()
pos = pos_0 + vx * N_fixed.x + vy * N_fixed.y + vz * N_fixed.z
vel = pos.diff(t, IF)
MF.set_pos_vec_IF(pos)
#MF.set_vel_vec_IF(vel)
MF_fixed.set_pos_vec_IF(pos)
#MF_fixed.set_vel_vec_IF(vel)
self.marker_obj.update({new_marker_name:MBSmarker(new_marker_name, MF, str_n_related)})
self.marker_fixed_obj.update({new_marker_name:MBSmarker(new_marker_name, MF_fixed, str_n_related)})
def add_moving_marker(self, new_marker_name, str_n_related, vx, vy, vz, vel, acc, axes):
"""
Add a moving marker framer via velocity and acceleration.
:param str new_marker_name: the name of the new marker
:param str str_n_related: the reference fixed body frame
:param float vx, vy, vz: is the const. translation vector in the related frame
:param float vel: the velocity of the marker
:param float acc: the acceleration of the marker
:param str axes: the axis where the velocity is oriented ('X', 'Y' or 'Z')
"""
global IF, O, g, t
try:
body = self.bodies_obj[str_n_related]
except:
raise InputError("no such body (make body first) %s" % str_n_related)
MF = MBSframe('MF_'+new_marker_name)
MF_fixed = MBSframe('MF_fixed_'+new_marker_name)
N_fixed = body.get_frame()
MF.orient(N_fixed,'Body',[0.,0.,0.], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[0.,0.,0.], 'XYZ')
(kx,ky,kz) = dynamicsymbols('kx, ky, kz')
if axes == 'X':
kx = vx + vel * t + 0.5* acc * t*t
ky = vy
kz = vz
elif axes == 'Y':
kx = vx
ky = vy + vel * t + 0.5* acc * t*t
kz = vz
elif axes == 'Z':
kx = vx
ky = vy
kz = vz + vel * t + 0.5* acc * t*t
pos_0 = N_fixed.get_pos_IF()
pos = pos_0 + kx * N_fixed.x + ky * N_fixed.y + kz * N_fixed.z
vel = pos.diff(t, IF)
MF.set_pos_vec_IF(pos)
#MF.set_vel_vec_IF(vel)
MF_fixed.set_pos_vec_IF(pos)
#MF_fixed.set_vel_vec_IF(vel)
self.marker_obj.update({new_marker_name:MBSmarker(new_marker_name, MF, str_n_related)})
self.marker_fixed_obj.update({new_marker_name:MBSmarker(new_marker_name, MF_fixed, str_n_related)})
def add_moving_marker_para(self, new_marker_name, str_n_related, para_name, vx, vy, vz, axes):
"""
Add a moving marker framer via a parameter.
:param str new_marker_name: the name of the new marker
:param str str_n_related: the reference fixed body frame
:param str para_name: the name of the involved parameter (as position)
:param float vx, vy, vz: the const. translation vector components in the related frame
:param str axes: the axis where the parameter as velocity is oriented ('X', 'Y' or 'Z')
"""
global IF, O, g, t
try:
body = self.bodies_obj[str_n_related]
except:
raise InputError("no such body (make body first) %s" % str_n_related)
try:
phi = [x.sym for x in self.param_obj if x.name == para_name][0]
except:
raise InputError("no such parameter name %s" % str(para_name))
MF = MBSframe('MF_'+new_marker_name)
MF_fixed = MBSframe('MF_fixed_'+new_marker_name)
N_fixed = body.get_frame()
MF.orient(N_fixed,'Body',[0.,0.,0.], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[0.,0.,0.], 'XYZ')
(kx,ky,kz) = dynamicsymbols('kx, ky, kz')
if axes == 'X':
kx = vx + phi
ky = vy
kz = vz
elif axes == 'Y':
kx = vx
ky = vy + phi
kz = vz
elif axes == 'Z':
kx = vx
ky = vy
kz = vz + phi
else:
raise InputError("please use 'X', 'Y' or 'Z' for axes")
pos_0 = N_fixed.get_pos_IF()
pos = pos_0 + kx * N_fixed.x + ky * N_fixed.y + kz * N_fixed.z
vel = pos.diff(t, IF)
MF.set_pos_vec_IF(pos)
#MF.set_vel_vec_IF(vel)
MF_fixed.set_pos_vec_IF(pos)
#MF_fixed.set_vel_vec_IF(vel)
self.marker_obj.update({new_marker_name:MBSmarker(new_marker_name, MF, str_n_related)})
self.marker_fixed_obj.update({new_marker_name:MBSmarker(new_marker_name, MF_fixed, str_n_related)})
def add_rotating_marker(self, new_marker_name, str_n_related, vx, vy, vz, omega, axes):
"""
Add a rotating marker framer with constant omega.
:param str new_marker_name: the name of the new marker
:param str str_n_related: the reference fixed body frame
:param float vx, vy, vz: the const. translation vector components in the related frame (float*3)
:param float omega: the angular velocity
:param str axes: the axis where the angular velocity is oriented ('X', 'Y' or 'Z')
"""
global IF, O, g, t
try:
body = self.bodies_obj[str_n_related]
except:
raise InputError("no such body (make body first) %s" % str_n_related)
MF = MBSframe('MF_'+new_marker_name)
MF_fixed = MBSframe('MF_fixed_'+new_marker_name)
N_fixed = body.get_frame()
if axes == 'Y':
MF.orient(N_fixed,'Body',[0.,omega*t,0.], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[0.,omega*t,0.], 'XYZ')
elif axes == 'Z':
MF.orient(N_fixed,'Body',[0.,0.,omega*t], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[0.,0.,omega*t], 'XYZ')
elif axes == 'X':
MF.orient(N_fixed,'Body',[omega*t, 0.,0.], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[omega*t, 0.,0.], 'XYZ')
else:
raise InputError("as axis enter one of these: X, Y, or Z")
pos_0 = N_fixed.get_pos_IF()
pos = pos_0 + vx * N_fixed.x + vy * N_fixed.y + vz * N_fixed.z
vel = pos.diff(t, IF)
MF.set_pos_vec_IF(pos)
#MF.set_vel_vec_IF(vel)
MF_fixed.set_pos_vec_IF(pos)
#MF_fixed.set_vel_vec_IF(vel)
self.marker_obj.update({new_marker_name:MBSmarker(new_marker_name, MF, str_n_related)})
self.marker_fixed_obj.update({new_marker_name:MBSmarker(new_marker_name, MF_fixed, str_n_related)})
def add_marker(self, new_marker_name, str_n_related, vx, vy, vz, phix = 0., phiy = 0., phiz = 0.):
"""
Add a fixed marker framer related to a body frame. Marker frames are used to add joints or forces (they usually act between two of them or a body and a marker).
:param str new_marker_name: the name of the new marker
:param str str_n_related: the reference fixed body frame
:param float vx, vy, vz: the const. translation vector components in the related frame (float*3)
:param float phix,phiy,phiz: the const. rotation Eulerian angles for a new (body-fixed) orientation of the marker frame
"""
global IF, O, g, t
try:
body = self.bodies_obj[str_n_related]
except:
raise InputError("no such body (make body first) %s" % str_n_related)
MF = MBSframe('MF_'+new_marker_name)
MF_fixed = MBSframe('MF_fixed_'+new_marker_name)
N_fixed = body.get_frame()
MF.orient(N_fixed,'Body',[phix,phiy,phiz], 'XYZ')
MF_fixed.orient(N_fixed,'Body',[phix,phiy,phiz], 'XYZ')
pos_0 = N_fixed.get_pos_IF()
pos = pos_0 + vx * N_fixed.x + vy * N_fixed.y + vz * N_fixed.z
vel = pos.diff(t, IF)
MF.set_pos_vec_IF(pos)
#MF.set_vel_vec_IF(vel)
MF_fixed.set_pos_vec_IF(pos)
#MF_fixed.set_vel_vec_IF(vel)
self.marker_obj.update({new_marker_name:MBSmarker(new_marker_name, MF, str_n_related)})
self.marker_fixed_obj.update({new_marker_name:MBSmarker(new_marker_name, MF_fixed, str_n_related)})
def _interpretation_of_str_m_b(self,str_m_b):
"""
Relates the name string of a marker or body to an internal number and frame and a boolean which indicates the type (body or marker)
:param str_m_b: the name string of marker or body
"""
obj = None
if self.bodies_obj.has_key(str_m_b):
try:
obj = body = self.bodies_obj[str_m_b]
n = body.get_n() #self.bodies[str_m_b]
N_fixed_n = body.get_frame() #self.body_frames[n]
is_body = True
except:
raise InputError("body frame not existent for name %s" % str_m_b)
else:
try:
obj = marker = self.marker_fixed_obj[str_m_b]
b_name = marker.get_body_name()
n = self.bodies_obj[b_name].get_n()
N_fixed_n = marker.get_frame()
is_body = False
except:
raise InputError("marker frame not existent for name %s" % str_m_b)
return n, N_fixed_n, is_body, obj
def add_body_3d(self, new_body_name, str_n_marker, mass, I , joint, parameters = [], graphics = True):
"""
Core function to add a body for your mbs model. Express the pos and vel of the body in terms of q and u (here comes the joint crunching).
Generalized coordinates q and u are often (not always) written in the rotated center of mass frame of the previous body.
:param str new_body_name: the name of the new body (freely given by the user)
:param str str_n_marker: the name string of the marker where the new body is related to (fixed on)
:param float mass: the mass of the new body
:param float*3 I: the inertia of the new body measured in the body symmetric center of mass frame (float*3 list) [Ixx, Iyy, Izz]
:param str joint: the type of the joint: possible choices are (see code and examples). You can add whatever joint you want to. Here the degrees of freedom are generated.
:param float*n parameters: the parameters to descripe the joint fully (see code)
:param str graphics: the choice of the grafics representative (ball, car, tire)
"""
global IF, O, g, t
if not str_n_marker in self.marker_obj:
raise InputError("marker frame not existent for name %s" % str_n_marker)
#print n_marker, n_att
if graphics:
self.con_type.append(joint)
else:
self.con_type.append('transparent')
self.n_body += 1
n_body = self.n_body # number of the actual body (starts at 0)
# create correct number of symbols for the next body
if joint in def_joints:
jobj = def_joints[joint]
d_free = jobj.n_free
joint = 'general'
else:
raise InputError("no such joint name %s" % str(joint))
self.q.append(dynamicsymbols('q'+str(n_body)+'x0:'+str(d_free)))
self.u.append(dynamicsymbols('u'+str(n_body)+'x0:'+str(d_free)))
self.a.append(dynamicsymbols('a'+str(n_body)+'x0:'+str(d_free)))
self.dof += d_free
self.m.append(mass)
self.Ixx.append(I[0])
self.Iyy.append(I[1])
self.Izz.append(I[2])
# add the center of mass point to the list of points
Pt = Point('O_'+new_body_name)
# we need previous cm_point to find origin of new frame
N_att = self.marker_obj[str_n_marker].get_frame()
vec_att = N_att.get_pos_IF()
N_att_fixed = self.marker_fixed_obj[str_n_marker].get_frame()
#create frame for each body on the center of mass and body fixed
N_fixed = MBSframe('N_fixed_'+new_body_name)
N_fixed.set_pos_Pt(Pt)
self.body_frames[n_body] = N_fixed
t_frame = vec_att
#express the velocity and position in terms of the marker frame
# not to forget the body fixed frame
# if joint == 'rod-2-cardanic-old': # parameter[0]: length
# N_fixed.orient(N_att_fixed, 'Body', [self.q[n_body][0],self.q[n_body][1],0.], 'ZXY' )
# N_att.orient(N_att_fixed, 'Body', [self.q[n_body][0],self.q[n_body][1],0.], 'ZXY' )
# pos_pt = (-parameters[0]*N_att.y).express(IF, variables = True)+t_frame
# elif joint == 'rod-2-revolute-scharnier-old': # parameter[0]: length
# N_fixed.orient(N_att_fixed, 'Body', [self.q[n_body][0],self.q[n_body][1],0.], 'YXZ' )
# N_att.orient(N_att_fixed, 'Body', [self.q[n_body][0],self.q[n_body][1],0.], 'YXZ' )
# pos_pt = (-parameters[0]*N_att.y).express(IF, variables = True)+t_frame
if joint == 'general':
frames = {0:IF, 1:N_att, 2:N_att_fixed}
print("General frames: ", IF, N_att, N_att_fixed)
q_list = self.q[n_body]
rot_order = []
free_dict = dict(zip(jobj.free_list, q_list))
joint_parameters = dict(zip(jobj.const_list, parameters))
for s in jobj.rot_order:
if s in jobj.free_list:
rot_order.append(free_dict[s])
elif s in jobj.const_list:
rot_order.append(joint_parameters[s])
else:
rot_order.append(0.)
print("General rot: ", frames[jobj.rot_frame], rot_order, jobj.c_string)
N_fixed.orient(frames[jobj.rot_frame], 'Body', rot_order, jobj.c_string)
N_att.orient(frames[jobj.rot_frame], 'Body', rot_order, jobj.c_string)
trans = jobj.x*frames[jobj.trans_frame].x + jobj.y*frames[jobj.trans_frame].y + jobj.z*frames[jobj.trans_frame].z
trans = trans.subs(joint_parameters)
trans = trans.subs(free_dict)
for s in jobj.trans:
if not s in jobj.free_list:
trans = trans.subs({s:0.})
print ("General trans: ",trans)
pos_pt = trans.express(IF, variables = True)+t_frame
else:
# we will never get here because we already checked for comleteness
# by looking in the containers of free0 - free6, but we never know, so again...
raise InputError("no such joint name %s" % str(joint))
vel_pt = pos_pt.diff(t, IF)
# here we update the joint name to the body
if joint == 'general':
joint_name = jobj.name
else:
joint_name = joint
self.bodies_obj.update({new_body_name:MBSbody(n_body,new_body_name, mass, I, pos_pt, vel_pt, N_fixed, joint_name, N_att, N_att_fixed, str_n_marker) })
self.bodies_obj[new_body_name].set_freedoms_dict(free_dict)
print( "body no:", n_body )
print( "body has joint:", joint_name )
print( "body pos:", pos_pt )
print( "body vel:", vel_pt )
#TODO check the coefficients and delete all small ones
N_fixed.set_pos_vec_IF(pos_pt)
Ixx = self.Ixx[n_body]*outer(N_fixed.x, N_fixed.x)
Iyy = self.Iyy[n_body]*outer(N_fixed.y, N_fixed.y)
Izz = self.Izz[n_body]*outer(N_fixed.z, N_fixed.z)
I_full = Ixx + Iyy + Izz
Pa = RigidBody('Bd' + str(n_body), Pt, N_fixed, self.m[n_body], (I_full, Pt))
self.particles.append(Pa)
for ii in range(d_free):
self.kindiffs.append(self.q[n_body][ii].diff(t) - self.u[n_body][ii])
self.accdiff.append(self.u[n_body][ii].diff(t) - self.a[n_body][ii])
self.bodies.update({new_body_name:n_body})
return n_body
def get_body(self, name):
return self.bodies_obj[name]
def get_model(self, name):
return self.models_obj[name]
def add_force(self, str_m_b_i, str_m_b_j, parameters = []):
"""
Interaction forces between body/marker i and j via spring damper element.
:param str str_m_b_i: name of the body/marker i
:param str str_m_b_j: name of the body/marker j
:param list parameters: stiffness, offset, damping-coefficient
"""
eps = 1e-2
if not len(parameters) == 3:
raise ParameterError(parameters, 2, "add_force")
i, N_fixed_i, _, body_i = self._interpretation_of_str_m_b(str_m_b_i)
j, N_fixed_j, _, body_j = self._interpretation_of_str_m_b(str_m_b_j)
Pt_i = N_fixed_i.get_pos_Pt()
Pt_j = N_fixed_j.get_pos_Pt()
r_ij = Pt_i.pos_from(Pt_j)
abs_r_ij = r_ij.magnitude()
vel = N_fixed_i.get_vel_vec_IF() - N_fixed_j.get_vel_vec_IF()
force = -parameters[0]*(r_ij-parameters[1]*r_ij.normalize()) - parameters[2]*vel
self.pot_energy_saver.append(0.5*parameters[0]*(abs_r_ij-parameters[1])**2)
self.forces.append((Pt_i,force))
self.forces.append((Pt_j,-force))
def add_force_special(self, str_m_b, force_type, parameters = []):
"""
Specialised forces on body (for shortcut input)
:param str str_m_b: is the name string of the body
:param str force_type: the type of force which is added
:param list parameters: corresponding parameters to describe the force
"""
global IF, O, g, t
n, N_fixed_n, is_body, body = self._interpretation_of_str_m_b(str_m_b)
if not is_body:
raise InputError("only a body name here (no marker)")
Pt_n = body.Pt()
N_att = body.get_N_att()
Pt_att = N_att.get_pos_Pt()
#print ( N_att, Pt_att )
if force_type == 'spring-axes':
if len(parameters) == 0 :
raise ParameterError(parameters, 1, "spring-axes")
force = -parameters[0]*N_fixed_n.y*(self.q[n][0]-parameters[1])
self.pot_energy_saver.append(0.5*parameters[0]*(self.q[n][0]-parameters[1])**2)
self.forces.append((Pt_att,-force))
if force_type == 'spring-damper-axes':
if len(parameters) == 0 :
raise ParameterError(parameters, 1, "spring-damper-axes")
force = (-parameters[0]*(self.q[n][0]-parameters[1])-parameters[2]*self.u[n][0])*N_fixed_n.y
#print force, Pt_att, Pt_n
self.pot_energy_saver.append(0.5*parameters[0]*(self.q[n][0]-parameters[1])**2)
self.forces.append((Pt_att,-force))
if force_type == 'spring-y':
if len(parameters) == 0 :
raise ParameterError(parameters, 1, "spring-y")
force = -parameters[0]*IF.y*self.q[n][0]
self.pot_energy_saver.append(0.5*parameters[0]*self.q[n][0]**2)
if force_type == 'grav':
force = -g*self.m[n]*IF.y
self.pot_energy_saver.append(g*self.m[n]*body.y()) # old: self.get_pt_pos(n,IF,1))
if force_type == 'spring-rod':
if len(parameters) < 2:
raise ParameterError(parameters, 2, "spring-rod")
force = parameters[1]*(self.q[n][1]-parameters[0])*N_att.y
self.pot_energy_saver.append(0.5*parameters[1]*(self.q[n][1]-parameters[0])**2)
self.forces.append((Pt_att,-force))
if force_type == 'spring-damper-rod':
if len(parameters) < 3:
raise ParameterError(parameters, 3, "spring-damper-rod")
force = parameters[1]*(self.q[n][1]-parameters[0])*N_att.y+parameters[2]*self.u[n][1]*N_att.y
self.pot_energy_saver.append(0.5*parameters[1]*(self.q[n][1]-parameters[0])**2)
self.forces.append((Pt_att,-force))
if force_type == 'spring-horizontal-plane':
if len(parameters) == 0 : #parameters[0] = D, paramters[1] = y0
raise ParameterError(parameters, 1, "spring-plane")
y_body = N_fixed_n.get_pos_IF().dot(IF.y)-parameters[1]
force = -parameters[0]*IF.y*y_body*(1.-re(sign(y_body))*0.5)
self.pot_energy_saver.append(0.5*parameters[0]*y_body**2*(1.-re(sign(y_body)))*0.5)
if force_type == 'spring-damper-horizontal-plane':
if len(parameters) == 0 : #parameters[0] = D, paramters[1] = y0
raise ParameterError(parameters, 1, "spring-plane")
y_body = N_fixed_n.get_pos_IF().dot(IF.y)-parameters[1]
v_body = y_body.diff()
force = IF.y*(-parameters[0]*y_body-parameters[2]*v_body)*(1.-re(sign(y_body)*0.5))#+f_norm)
self.pot_energy_saver.append(0.5*parameters[0]*y_body**2*(1.-re(sign(y_body))*0.5))
self.forces.append((Pt_n,force))
def add_torque_3d(self, str_m_b, torque_type, parameters = []):
global IF, O, g, t
n, N_fixed_n, _, body = self._interpretation_of_str_m_b(str_m_b)
N_fixed_m = body.get_N_att_fixed() #self.body_frames[m]
if torque_type == 'bending-stiffness-1':
print( 'stiffness' )
phi = self.q[n][0]
torque = -parameters[1]*( phi - parameters[0])*N_fixed_n.z # phi is always the first freedom
self.pot_energy_saver.append(0.5*parameters[1]*(phi-parameters[0])**2)
self.torques.append((N_fixed_n, torque))
self.torques.append((N_fixed_m, -torque))
elif torque_type == 'bending-stiffness-2':
print( 'stiffness-2' )
#r_vec = Pt_n.pos_from(Pt_m)
phi = -N_fixed_n.y.cross(N_att_fixed.y)
phi_m = asin(phi.magnitude())
#phi = phi.normalize()
torque = -parameters[1]*phi # phi is always the first freedom
self.pot_energy_saver.append(0.5*parameters[1]*phi_m**2)
self.torques.append((N_fixed_n, torque))
self.torques.append((N_fixed_m, -torque))
print( "TORQUE: ", phi, torque )
elif torque_type == 'rotation-stiffness-1':
print( 'rot-stiffness-1' )
phi = self.q[n][0]
torque = -parameters[0]*phi*N_fixed_m.y #-parameters[1]*phi_3
self.pot_energy_saver.append(0.5*parameters[0]*phi**2)
self.torques.append((N_fixed_n, torque))
self.torques.append((N_fixed_m, -torque))
print( "TORQUE: ", phi, "...", torque )
else:
raise InputError(torque_type)
def add_parameter_torque(self, str_m_b, str_m_b_ref, v, para_name):
"""
Add an external torque to a body in the direction v, the abs value is equal the value of the parameter (para_name)
:param str_m_b: a body name or a marker name
:param str_m_b_ref: a body name or a marker name as a reference where vel and omega is transmitted to the model and in which expressed the force and torque is acting
:param v: direction of the torque (not normalized)
:param para_name: name of the parameter
"""
global IF, O, g, t
try:
phi = [x.sym for x in self.param_obj if x.name == para_name][0]
except:
raise InputError("no such parameter name %s" % str(para_name))
n, N_fixed_n, _, body = self._interpretation_of_str_m_b(str_m_b)
m, N_fixed_m, _, _ = self._interpretation_of_str_m_b(str_m_b_ref)
n_vec = ( v[0] * N_fixed_m.x + v[1] * N_fixed_m.y + v[2] * N_fixed_m.z )
self.torques.append((N_fixed_n, n_vec*phi/sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]) ))
def add_parameter_force(self, str_m_b, str_m_b_ref, v, para_name):
"""
Add an external force to a body in the direction v, the abs value is equal the value of the parameter (para_name)
:param str_m_b: a body name or a marker name
:param str_m_b_ref: a body name or a marker name as a reference where vel and omega is transmitted to the model and in which expressed the force and torque is acting
:param v: direction of the force (not normalized)
:param para_name: name of the parameter
"""
global IF, O, g, t
try:
phi = [x.sym for x in self.param_obj if x.name == para_name][0]
except:
raise InputError("no such parameter name %s" % str(para_name))
n, N_fixed_n, _, body = self._interpretation_of_str_m_b(str_m_b)
m, N_fixed_m, _, _ = self._interpretation_of_str_m_b(str_m_b_ref)
n_vec = v[0] * N_fixed_m.x + v[1] * N_fixed_m.y + v[2] * N_fixed_m.z
self.forces.append((N_fixed_n, n_vec*phi/sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]) ))
def add_one_body_force_model(self, model_name, str_m_b, str_m_b_ref, typ='tire', parameters = []):
"""
Add an (external) model force/torque for one body: the force/torque is acting
on the body if the parameter is a body string and on the marker if it is a marker string
:param str_m_b: a body name or a marker name
:param str_m_b_ref: a body name or a marker name as a reference where vel and omega is transmitted to the model and in which expressed the force and torque is acting
:param typ: the type of the model (the types can be extended easily, you are free to provide external models via the interface)
:param parameters: a dict of parameters applied to the force model as initial input
"""
global IF, O, g, t
F = []
T = []
self.forces_models_n += 1
n, N_fixed_n, _, body = self._interpretation_of_str_m_b(str_m_b)
m, N_fixed_m, _, _ = self._interpretation_of_str_m_b(str_m_b_ref)
F = dynamicsymbols('F_models'+str(n)+"x0:3")
T = dynamicsymbols('T_models'+str(n)+"x0:3")
self.f_t_models_sym = self.f_t_models_sym + F + T
Pt_n = N_fixed_n.get_pos_Pt()
# prepare body symbols
pos = N_fixed_n.get_pos_IF()
pos_x = pos.dot(IF.x)
pos_y = pos.dot(IF.y)
pos_z = pos.dot(IF.z)
vel = N_fixed_n.get_vel_vec_IF()
vel_x = vel.dot(N_fixed_m.x)
vel_y = vel.dot(N_fixed_m.y)
vel_z = vel.dot(N_fixed_m.z)
omega = N_fixed_n.get_omega(IF)
omega_x = omega.dot(N_fixed_m.x)
omega_y = omega.dot(N_fixed_m.y)
omega_z = omega.dot(N_fixed_m.z)
n_vec = N_fixed_n.z
n_x = n_vec.dot(IF.x)
n_y = n_vec.dot(IF.y)
n_z = n_vec.dot(IF.z)
#get the model and supply the trafos
if typ == 'general':
oo = one_body_force_model()
self.models.append(oo)
self.models_obj.update({model_name: MBSmodel(model_name, oo) })
elif typ == 'tire':
oo = simple_tire_model()
self.models.append(oo)
self.models_obj.update({model_name: MBSmodel(model_name, oo) })
self.models[-1].set_coordinate_trafo([pos_x, pos_y, pos_z, n_x, n_y, n_z, vel_x, vel_y, vel_z, omega_x, omega_y, omega_z])
force = self.f_t_models_sym[-6]*N_fixed_m.x + self.f_t_models_sym[-5]*N_fixed_m.y + self.f_t_models_sym[-4]*N_fixed_m.z
torque = self.f_t_models_sym[-3]*N_fixed_m.x + self.f_t_models_sym[-2]*N_fixed_m.y + self.f_t_models_sym[-1]*N_fixed_m.z
#print "TTT: ",torque
self.forces.append((Pt_n,force))
self.torques.append((N_fixed_n, torque))
def add_force_spline_r(self, str_m_b_i, str_m_b_j, filename, param = [0., 1.0]):
"""
Interaction forces between body/marker i and j via characteristic_line class interp (ind. variable is the distance of the two bodies)
:param str str_m_b_i: name of the body/marker i
:param str str_m_b_j: name of the body/marker j
"""
i, N_fixed_i, _, body = self._interpretation_of_str_m_b(str_m_b_i)
j, N_fixed_j, _, _ = self._interpretation_of_str_m_b(str_m_b_j)
Pt_i = N_fixed_i.get_pos_Pt()
Pt_j = N_fixed_j.get_pos_Pt()
r_ij = Pt_i.pos_from(Pt_j)
abs_r_ij = r_ij.magnitude()
self.f_int_act.append(dynamicsymbols('f_int'+str(i)+str(j)+'_'+str(self.forces_int_n)))
self.forces_int_n += 1
#one is sufficient as symbol
force = param[1]*self.f_int_act[-1]*r_ij/(abs_r_ij+1e-3)
kl = interp(filename)
self.f_int_expr.append(abs_r_ij-param[0])
self.f_int_func.append(kl.f_interp)
#actio = reactio !!
self.forces.append((Pt_i,force))
self.forces.append((Pt_j,-force))
def add_force_spline_v(self, str_m_b_i, str_m_b_j, filename, param = [1.0]):
"""
Interaction forces between body/marker i and j via characteristic_line class interp (ind. variable is the relative velocity of the two bodies)
:param str str_m_b_i: name of the body/marker i
:param str str_m_b_j: name of the body/marker j
"""
i, N_fixed_i, _, body = self._interpretation_of_str_m_b(str_m_b_i)
j, N_fixed_j, _, _ = self._interpretation_of_str_m_b(str_m_b_j)
Pt_i = N_fixed_i.get_pos_Pt()
Pt_j = N_fixed_j.get_pos_Pt()
r_ij = Pt_i.pos_from(Pt_j)
abs_r_ij = r_ij.magnitude()
abs_r_ij_dt = abs_r_ij.diff()
self.f_int_act.append(dynamicsymbols('f_int'+str(i)+str(j)+'_'+str(self.forces_int_n)))
self.forces_int_n += 1
#one is sufficient as symbol
force = param[0]*self.f_int_act[-1]*r_ij/abs_r_ij
kl = interp(filename)
self.f_int_expr.append(abs_r_ij_dt)
self.f_int_func.append(kl.f_interp)
#actio = reactio !!
self.forces.append((Pt_i,force))
self.forces.append((Pt_j,-force))
def add_force_ext(self, str_m_b, str_m_b_ref, vx, vy, vz, py_function_handler):
'''
Includes the symbol f_ext_n for body/marker str_m_b expressed in frame str_m_b_ref in direction vx, vy, vz
:param str str_m_b: name of the body/marker
:param str str_m_b_ref: existing reference frame
:param float vx, vy, vz: const. vector components giving the direction in the ref frame
'''
global IF, O, g, t
n, N_fixed_n, _, body = self._interpretation_of_str_m_b(str_m_b)
m, N_fixed_m, _, _ = self._interpretation_of_str_m_b(str_m_b_ref)
#old: Pt_n = self.body_frames[n].get_pos_Pt()
print( n, N_fixed_n )
Pt_n = N_fixed_n.get_pos_Pt()
self.f_ext_act.append(dynamicsymbols('f_ext'+str(n)))
f_vec = vx * self.f_ext_act[-1]*N_fixed_m.x.express(IF) +\
vy * self.f_ext_act[-1]*N_fixed_m.y.express(IF) +\
vz * self.f_ext_act[-1]*N_fixed_m.z.express(IF)
self.forces_ext_n += 1
self.forces.append((Pt_n,f_vec))
self.f_ext_func.append(py_function_handler)
def get_frame(self, str_m_b):
m, frame, _, _ = self._interpretation_of_str_m_b(str_m_b)
return frame
def add_geometric_constaint(self, str_m_b, equ, str_m_b_ref, factor):
"""
Function to add a geometric constraint (plane equation in the easiest form)
:param str str_m_b: name of the body/marker for which the constraint is valid
:param sympy-expression equ: is of form f(x,y,z)=0,
:param str str_m_b_ref: reference frame
:param float factor: the factor of the constraint force (perpendicular to the plane). If this is higher, the constraint is much better fullfilled, but with much longer integration time (since the corresponding eigenvalue gets bigger).
"""
global IF, O, g, t
n, N_fixed_n, is_body, body = self._interpretation_of_str_m_b(str_m_b)
m, frame_ref, _, _ = self._interpretation_of_str_m_b(str_m_b_ref)
#frame_ref = IF
x = frame_ref[0]
y = frame_ref[1]
z = frame_ref[2]
#################################
#valid for planes
#nx = equ.coeff(x,1)
#ny = equ.coeff(y,1)
#nz = equ.coeff(z,1)
#nv = (nx*frame_ref.x + ny*frame_ref.y + nz*frame_ref.z).normalize()
#################################
#valid in general
nv = gradient(equ, frame_ref).normalize()
self.equ = nv.dot(frame_ref.x)*x + nv.dot(frame_ref.y)*y + nv.dot(frame_ref.z)*z
Pt = N_fixed_n.get_pos_Pt()
vec = Pt.pos_from(O).express(frame_ref, variables = True)
x_p = vec.dot(IF.x)
y_p = vec.dot(IF.y)
z_p = vec.dot(IF.z)
d_c1 = equ.subs({x:x_p, y:y_p, z:z_p}) #.simplify()
d_c2 = d_c1.diff(t)
self.nv = nv = nv.subs({x:x_p, y:y_p, z:z_p})
C = 1000. * factor
if is_body:
gamma = 2.*sqrt(self.m[n]*C)
else:
gamma = 2.*sqrt(C)
#check for const. forces (e.g. grav) -> Projectiorator ...
for ii in range(len(self.forces)):
if self.forces[ii][0] == Pt:
proj_force = - self.forces[ii][1].dot(nv)*nv
self.forces.append((Pt, proj_force))
#first add deviation force
self.forces.append((Pt,(- C * d_c1 - gamma * d_c2)*factor*nv))
#second project the inertia forces on the plane
# Note:
# here the number C_inf is in theory infinity to project correctly:
# any large number can only be applied for states which are in accordance with the constraint
# otherwise the constraint is not valid properly
#alpha = symbols('alpha')
#myn = -alpha*nv
C_inf = 10.0 * factor
proj_force = - C_inf * self.m[n]*d_c2.diff(t)*nv #- self.m[n]*acc.dot(nv)*nv
#self.forces.append((Pt, myn))
self.forces.append((Pt, proj_force))
self.eq_constr.append(d_c1)
self.n_constr.append(n)
def add_reflective_wall(self, str_m_b, equ, str_m_b_ref, c, gamma, s):
"""
Function to add a reflective wall constraint (plane equation in the easiest form)
:param str str_m_b: name of the body/marker
:param sympy-expression equ: is of form f(x,y,z)=0,
:param str str_m_b_ref: reference frame
:param float c: the stiffness of the reflective wall
:param float gamma: the damping (only perpendicular) of the wall
:param float s: the direction of the force (one of (1,-1))
"""
global IF, O, g, t
n, N_fixed_n, _, body = self._interpretation_of_str_m_b(str_m_b)
m, frame_ref, _, _ = self._interpretation_of_str_m_b(str_m_b_ref)
x = frame_ref[0]
y = frame_ref[1]
z = frame_ref[2]
nx = equ.coeff(x,1)
ny = equ.coeff(y,1)
nz = equ.coeff(z,1)
nv = (nx*frame_ref.x + ny*frame_ref.y + nz*frame_ref.z).normalize()
Pt = N_fixed_n.get_pos_Pt()
vec = Pt.pos_from(O).express(frame_ref, variables = True)
x_p = vec.dot(IF.x)
y_p = vec.dot(IF.y)
z_p = vec.dot(IF.z)
d_c1 = equ.subs({x:x_p, y:y_p, z:z_p}) #.simplify()
d_c2 = d_c1.diff(t)
self.forces.append((Pt,(-c * d_c1 - gamma * d_c2)*(1.-s*re(sign(d_c1)))*nv))
def add_damping(self, str_m_b, gamma):
"""
Add damping for the body
:param str str_m_b: name of the body
:param float gamma: the damping coefficient (acting in all directions)
"""
n, N_fixed_n, is_body, body = self._interpretation_of_str_m_b(str_m_b)
if not is_body:
raise InputError("Damping mus be add on the body")
Pt = body.Pt() #self.body_frames[n].get_pos_Pt()
damp = -gamma * body.get_vel()
self.forces.append((Pt, damp))
def get_omega(self, n, frame, coord):
'''
coord is 0..2 (x,y,z-direction)
'''
omega = self.body_frames[n].get_omega(frame)
if coord == 0:
v = frame.x
elif coord == 1:
v = frame.y
elif coord == 2:
v = frame.z
omega_c = omega.express(frame, variables = True).dot(v)
return omega_c.subs(self.kindiff_dict)
#
# def correct_the_initial_state(self, m, x0):
# global IF, O, g, t
# #correct the initial state vector, dynamic var number m
# #TODO make it more general
# dynamic = self.q_flat + self.u_flat
# x00 = dict(zip(dynamic, x0))
# self.x00 = x00
# x,y = symbols('x y')
# if len(self.q[m]) == 1:
# x00[self.q[m][0]] = x
# equ = (self.d_c1.subs(self.const_dict)).subs(x00)
# x0[m] = sp_solve(equ)[0]
# x00 = dict(zip(dynamic, x0))
# m2 = m+self.dof
# x00[self.u[m][0]] = x
# equ = (self.d_c2.subs(self.const_dict)).subs(x00)
# x0[m2] = sp_solve(equ)[0]
# else:
# x00[self.q[m][0]] = 0.5
# x00[self.q[m][1]] = y
# self.equ_a = (self.d_c1.subs(self.const_dict)).subs(x00)
# x0[m] = 0.5
# x0[m+1] = sp_solve(self.equ_a,y)[0]
# return x0
def set_const_dict(self, const_dict):
self.const_dict = const_dict
def kaneify_lin(self, q_ind, u_ind, q_dep, u_dep, c_cons, u_cons, x_op, a_op):
global IF, O, g, t
self.q_flat = q_ind #[ii for mi in self.q for ii in mi]
self.u_flat = u_ind #[ii for mi in self.u for ii in mi]
self.a_flat = [ii.diff() for ii in u_ind] #[ii for mi in self.a for ii in mi]
#add external forces to the dynamic vector
for oo in self.param_obj:
self.parameters += oo.get_paras()
self.parameters_diff.update(oo.get_diff_dict())
self.freedoms = self.q_flat + self.u_flat + self.parameters
self.dynamic = self.q_flat + self.u_flat + [t] + self.f_ext_act + \
self.parameters + self.f_int_act + self.f_t_models_sym
self.kane = KanesMethod(IF, q_ind=self.q_flat, u_ind=self.u_flat,
q_dependent = q_dep, u_dependent = u_dep, configuration_constraints = c_cons,
velocity_constraints = u_cons, kd_eqs=self.kindiffs)
self.fr, self.frstar = self.kane.kanes_equations(self.forces+self.torques, self.particles)
#print u_cons
self.A, self.B, self.inp_vec = self.kane.linearize(op_point=x_op, A_and_B=True,
new_method=True, simplify =True)
self.A = self.A.subs(self.kindiff_dict)
self.B = self.B.subs(self.kindiff_dict)
a_cons = [ ii.diff() for ii in u_cons]
#too special TODO generalize...??
for equ in u_cons:
x = dynamicsymbols('x')
for u in u_dep:
x = sp_solve(equ, u)
if len(x)>0:
self.A = self.A.subs({u:x[0]})
self.B = self.B.subs({u:x[0]})
for equ in a_cons:
x = dynamicsymbols('x')
for u in u_dep:
x = sp_solve(equ, u.diff())
if len(x)>0:
self.A = self.A.subs({u.diff():x[0]})
self.B = self.B.subs({u.diff():x[0]})
#generalize doit()
for i in range(self.A.shape[0]):
for j in range(self.A.shape[1]):
self.A[i,j] = self.A[i,j].doit()
self.A = self.A.subs(self.accdiff_dict)
self.A = self.A.subs(x_op)
self.A = self.A.subs(a_op)
self.B = self.B.subs(self.accdiff_dict)
self.B.simplify()
self.A.simplify()
self.A = self.A.subs(self.const_dict)
self.ev = self.A.eigenvals()
k = 1
myEV = []
print( "*****************************" )
for e in self.ev.keys():
myEV.append(e.evalf())
print( k, ". EV: ", e.evalf() )
k+= 1
print( "*****************************" )
ar = self.matrix_to_array(self.A, self.A.shape[0])
self.eig = eig(ar)
def matrix_to_array(self, A, n):
out = array(float(A[0,0]))
for i in range(n-1):
out = hstack((out, float(A[0,i+1])))
for j in range(n-1):
line = array(float(A[j+1,0]))
for i in range(n-1):
line = hstack((line, float(A[j+1,i+1])))
out = vstack((out, line))
print( out )
return out
def kaneify(self, simplify = False):
global IF, O, g, t
print( "Assemble the equations of motion ..." )
tic = time.clock()
self.q_flat = [ii for mi in self.q for ii in mi]
self.u_flat = [ii for mi in self.u for ii in mi]
self.a_flat = [ii for mi in self.a for ii in mi]
#add external, internal forces to the dynamic vector + model forces + parameters
for oo in self.param_obj:
self.parameters += oo.get_paras()
self.parameters_diff.update(oo.get_diff_dict())
self.freedoms = self.q_flat + self.u_flat + [t] + self.parameters
self.dynamic = self.q_flat + self.u_flat + [t] + self.f_ext_act + \
self.parameters + self.f_int_act + self.f_t_models_sym
#we need the accdiff_dict for forces and rod forces
for ii in range(len(self.u_flat)):
x = dynamicsymbols('x')
x = sp_solve(self.accdiff[ii],self.u_flat[ii].diff(t))[0]
self.accdiff_dict.update({self.u_flat[ii].diff(t): x })
#strange occurrence of - sign in the linearized version
self.accdiff_dict.update({self.u_flat[ii].diff(t).subs({self.u_flat[ii]:-self.u_flat[ii]}): -x })
if self.connect and not self.db_setup:
print( "from the db ..." )
wd = worldData()
wd.put_str(self.mydb.get(self.name)[1])
newWorld = list_to_world( wd.get_list() )
self.M = newWorld.M
self.F = newWorld.F
self.kindiff_dict = newWorld.kindiff_dict
if not self.dynamic == newWorld.dynamic:
print( self.dynamic )
print( newWorld.dynamic )
raise Exception
else:
print( "calc further (subs)..." )
self.kane = KanesMethod(IF, q_ind=self.q_flat, u_ind=self.u_flat, kd_eqs=self.kindiffs)
self.fr, self.frstar = self.kane.kanes_equations(self.forces+self.torques, self.particles)
self.kindiff_dict = self.kane.kindiffdict()
self.M = self.kane.mass_matrix_full.subs(self.kindiff_dict) # Substitute into the mass matrix
self.F = self.kane.forcing_full.subs(self.kindiff_dict) # Substitute into the forcing vector
##########################################################
self.M = self.M.subs(self.parameters_diff)
self.F = self.F.subs(self.parameters_diff)
self.M = self.M.subs(self.const_dict)
self.F = self.F.subs(self.const_dict)
######################################################
if len(self.n_constr) > 0:
self.F = self.F.subs(self.accdiff_dict)
i_off = len(self.F)/2
for ii in range(i_off):
for line in range(i_off):
jj = line + i_off
pxx = Poly(self.F[jj], self.a_flat[ii])
if len(pxx.coeffs()) > 1:
self.cxx = pxx.coeffs()[0]
else:
self.cxx = 0.
self.M[jj, ii+i_off] -= self.cxx
accdiff_zero = {} #
for k in self.a_flat:
accdiff_zero.update({ k:0 })
self.F = self.F.subs(accdiff_zero)
########################################################
# db stuff
if self.connect and self.db_setup:
wd = worldData(self)
self.mydb.put(self.name, wd.get_str())
########################################################
if simplify:
print( "start simplify ..." )
self.M.simplify()
self.F.simplify()
########################################################
# calc simplifications due to small angles
for oo in self.bodies_obj.values():
small = oo.get_small_angles()
if small:
print("small: ", oo.name)
sin_small = [sin(ii) for ii in small]
cos_small = [cos(ii) for ii in small]
sin_small_dict = dict(zip(sin_small, small))
cos_small_dict = dict(zip(cos_small, [1.0]*len(small)))
print (cos_small_dict, sin_small_dict)
self.M = self.M.subs(sin_small_dict)
self.M = self.M.subs(cos_small_dict)
self.F = self.F.subs(sin_small_dict)
self.F = self.F.subs(cos_small_dict)
print( "equations now in ram... lambdify the M,F parts" )
self.M_func = lambdify(self.dynamic, self.M) # Create a callable function to evaluate the mass matrix
self.F_func = lambdify(self.dynamic, self.F) # Create a callable function to evaluate the forcing vector
#lambdify the part forces (only with self.freedom)
for expr in self.f_int_expr:
expr = expr.subs(self.kindiff_dict)
self.f_int_lamb.append(lambdify(self.q_flat + self.u_flat, expr))
########################################################
#lambdify the models, include also some dicts
for model in self.models:
model.set_subs_dicts([self.kindiff_dict, self.parameters_diff])
model.lambdify_trafo(self.freedoms)
self.f_models_lamb.append(model.force_lam)
for oo in self.control_signals_obj:
oo.subs_dict(self.kindiff_dict)
oo.lambdify(self.q_flat + self.u_flat)
nums = self.bodies.values()
nums.sort()
for n in nums[:-1]:
self.body_list_sorted.append([oo for oo in self.bodies_obj.values() if oo.get_n() == n][0])
toc = time.clock()
#######################################################
# set all dicts to all frames
d = [self.kindiff_dict, self.accdiff_dict, self.const_dict]
for oo in self.bodies_obj.values():
oo.set_dicts(d)
for oo in self.marker_obj.values():
oo.set_dicts(d)
for oo in self.marker_fixed_obj.values():
oo.set_dicts(d)
print( "finished ... ", toc-tic )
def right_hand_side(self, x, t, args = []):
#for filling up order of f_t see kaneify self.dynamics ...
para = [ pf(t) for oo in self.param_obj for pf in oo.get_func()]
r_int = [r(*x) for r in self.f_int_lamb]
f_int = [self.f_int_func[i](r_int[i]) for i in range(len(r_int))]
f_t = [t] + [fe(t) for fe in self.f_ext_func] + para + f_int
inp = hstack((x, [t] + para))
for ii in range(self.forces_models_n):
F_T_model, model_signals = self.f_models_lamb[ii](inp)
f_t += F_T_model
#generate the control signals (to control somewhere else)
for oo in self.control_signals_obj:
v = oo.calc_signal(x)
#checkpoint output
if t>self.tau_check:
self.tau_check+=0.1
print( t )
arguments = hstack((x,f_t)) # States, input, and parameters
#lu = factorized(self.M_func(*arguments))
#dx = lu(self.F_func(*arguments)).T[0]
dx = array(np_solve(self.M_func(*arguments),self.F_func(*arguments))).T[0]
return dx
def get_control_signal(self, no):
try:
return self.control_signals_obj[no].get_signal_value()
except:
return 0.
def right_hand_side_ode(self, t ,x ):
para = [ pf(t) for oo in self.param_obj for pf in oo.get_func()]
f_t = [t] + [fe(t) for fe in self.f_ext_func] + para + \
[fi(*x) for fi in f_int_lamb]
inp = hstack((x, [t]+para))
for ii in range(self.forces_models_n):
F_T_model, model_signals = self.f_models_lamb[ii](inp)
f_t += F_T_model
arguments = hstack((x,f_t)) # States, input, and parameters
dx = array(sc_solve(self.M_func(*arguments),self.F_func(*arguments))).T[0]
return dx
def res_body_pos_IF(self):
global IF, O, g, t
IF_coords = []
for oo in self.body_list_sorted:
IF_coords.append( oo.x().subs(self.const_dict) ) #self.get_pt_pos(ii,IF,0).subs(self.const_dict))
IF_coords.append( oo.y().subs(self.const_dict) ) #self.get_pt_pos(ii,IF,1).subs(self.const_dict))
IF_coords.append( oo.z().subs(self.const_dict) ) #self.get_pt_pos(ii,IF,2).subs(self.const_dict))
f_t = [t] + self.parameters
self.pos_cartesians_lambda = lambdify(self.q_flat+f_t, IF_coords)
def res_body_orient(self):
frame_coords = []
for oo in self.body_list_sorted:
#print "add orient for body: ",ii
N_fixed = oo.get_frame()
ex_x = N_fixed.x.dot(IF.x)
ex_y = N_fixed.x.dot(IF.y)
ex_z = N_fixed.x.dot(IF.z)
ey_x = N_fixed.y.dot(IF.x)
ey_y = N_fixed.y.dot(IF.y)
ey_z = N_fixed.y.dot(IF.z)
ez_x = N_fixed.z.dot(IF.x)
ez_y = N_fixed.z.dot(IF.y)
ez_z = N_fixed.z.dot(IF.z)
frame_coords += [ex_x,ex_y,ex_z,ey_x,ey_y,ey_z,ez_x,ez_y,ez_z]
f_t = [t] + self.parameters
#self.frame_coords = lambdify(self.q_flat+[t], frame_coords)
self.orient_cartesians_lambda = lambdify(self.q_flat+f_t, frame_coords)
def res_fixed_body_frames(self, body):
N_fixed = body.get_frame()
frame_coords = []
frame_coords.append( body.x().subs(self.const_dict))
frame_coords.append( body.y().subs(self.const_dict))
frame_coords.append( body.z().subs(self.const_dict))
ex_x = N_fixed.x.dot(IF.x)
ex_y = N_fixed.x.dot(IF.y)
ex_z = N_fixed.x.dot(IF.z)
ey_x = N_fixed.y.dot(IF.x)
ey_y = N_fixed.y.dot(IF.y)
ey_z = N_fixed.y.dot(IF.z)
ez_x = N_fixed.z.dot(IF.x)
ez_y = N_fixed.z.dot(IF.y)
ez_z = N_fixed.z.dot(IF.z)
frame_coords = frame_coords + [ex_x,ex_y,ex_z,ey_x,ey_y,ey_z,ez_x,ez_y,ez_z]
f_t = [t] + self.parameters
return lambdify(self.q_flat+f_t, frame_coords)
def res_body_marker_pos_IF(self):
global IF, O, g, t
IF_coords = []
for oo in self.body_list_sorted:
N_att = oo.get_N_att()
x_att = N_att.px()
y_att = N_att.py()
z_att = N_att.pz()
IF_coords.append( x_att.subs(self.const_dict))
IF_coords.append( y_att.subs(self.const_dict))
IF_coords.append( z_att.subs(self.const_dict))
IF_coords.append( oo.x().subs(self.const_dict) )
IF_coords.append( oo.y().subs(self.const_dict) )
IF_coords.append( oo.z().subs(self.const_dict) )
f_t = [t] + self.parameters
self.connections_cartesians_lambda = lambdify(self.q_flat+f_t, IF_coords)
def calc_acc(self):
t = self.time
u = self.x_t[:,self.dof:self.dof*2]
self.acc = zeros(self.dof)
for ti in range(1,len(t)):
acc_line = []
for ii in range(self.dof):
acc_line = hstack((acc_line,(u[ti][ii]-u[ti-1][ii])/(t[ti]-t[ti-1])))
self.acc = vstack((self.acc, acc_line))
#TODO : new setup of rod forces
# def res_rod_forces(self):
# self.f_rod = []
# for oo in self.body_list_sorted:
# N_fixed_n = oo.get_frame()
# Pt_n = oo.Pt()
# ay = self.get_pt_acc(ii,N_fixed_n,1).subs(self.const_dict)
# f_ex_constr = 0.
# for jj in self.forces:
# if jj[0] == Pt_n:
# #print type(f_ex_constr)
# f_ex_constr += jj[1].dot(N_fixed_n.y) #here assumed that the rod is in y-direction
# self.f_rod.append(oo.get_mass()*ay-f_ex_constr)
# #print "f_rod: ",self.f_rod[ii]
# self.rod_f_lambda = lambdify(self.q_flat+self.u_flat+self.a_flat, self.f_rod)
def res_total_force(self, oo):
global IF, O, g, t
res_force = []
res_force.append( oo.x() )
res_force.append( oo.y() )
res_force.append( oo.z() )
res_force.append(oo.x_ddt()*oo.mass )
res_force.append(oo.y_ddt()*oo.mass )
res_force.append(oo.z_ddt()*oo.mass )
f_t = [t] + self.parameters
return lambdify(self.q_flat+self.u_flat+self.a_flat+f_t, res_force)
def res_kin_energy(self):
E = 0
#translatory
#for ii in range(n_body+1):
for oo in self.body_list_sorted:
N_fixed = oo.get_frame() #self.body_frames[ii]
vel = oo.get_vel().subs(self.kindiff_dict)
E += 0.5*oo.get_mass()*vel.magnitude()**2
#substitude the parameter diffs
E = E.subs(self.parameters_diff)
f_t = [t] + self.parameters
self.e_kin_lambda = lambdify(self.q_flat+self.u_flat+f_t, E)
def res_speed(self):
global IF, O, g, t
N_fixed = self.body_frames[0]
vel = N_fixed.get_vel_vec_IF().subs(self.kindiff_dict)
vel_lateral = vel.dot(IF.x)*IF.x+vel.dot(IF.z)*IF.z
vel_mag = vel_lateral.magnitude()
vel_mag = vel_mag.subs(self.parameters_diff)
f_t = [t] + self.parameters
self.speed_lambda = lambdify(self.q_flat+self.u_flat+f_t, vel_mag)
def res_rot_energy(self):
E = 0
#translatory
for oo in self.body_list_sorted:
N_fixed = oo.get_frame()
omega = N_fixed.get_omega(IF).subs(self.kindiff_dict)
omega_x = omega.dot(N_fixed.x)
omega_y = omega.dot(N_fixed.y)
omega_z = omega.dot(N_fixed.z)
E += 0.5*(oo.I[0]*omega_x**2+oo.I[1]*omega_y**2+oo.I[2]*omega_z**2)
#E += 0.5*(self.Ixx[ii]*omega_x**2+self.Iyy[ii]*omega_y**2+self.Izz[ii]*omega_z**2)
#substitude the parameter diffs
E = E.subs(self.parameters_diff)
f_t = [t] + self.parameters
self.e_rot_lambda = lambdify(self.q_flat+self.u_flat+f_t, E)
def res_pot_energy(self):
E = 0
for x in self.pot_energy_saver:
E += x.subs(self.const_dict)
#substitude the parameter diffs
if len(self.pot_energy_saver) > 0:
E = E.subs(self.parameters_diff)
f_t = [t] + self.parameters
self.e_pot_lambda = lambdify(self.q_flat+f_t, E)
#def res_signal(self):
def show_figures(self):
pass
def prep_lambdas(self, moving_frames_in_graphics = [], fixed_frames_in_graphics = [], forces_in_graphics = [], bodies_in_graphics = {}):
print( "start preparing lambdas..." )
start = time.clock()
self.res_body_pos_IF()
self.res_body_orient()
self.vis_frame_coords = []
self.vis_fixed_frames = []
self.vis_force_coords = []
self.res_body_marker_pos_IF()
self.res_kin_energy()
self.res_pot_energy()
self.res_rot_energy()
self.res_speed()
for str_m_b in moving_frames_in_graphics:
n, N_fixed_n, is_body, oo = self._interpretation_of_str_m_b(str_m_b)
self.vis_frame_coords.append(self.res_fixed_body_frames(oo))
for str_m_b in fixed_frames_in_graphics:
n, N_fixed_n, is_body, body = self._interpretation_of_str_m_b(str_m_b)
if not is_body:
self.vis_fixed_frames.append(N_fixed_n)
for str_m_b in forces_in_graphics:
n, N_fixed_n, is_body, body = self._interpretation_of_str_m_b(str_m_b)
if is_body:
self.vis_force_coords.append(self.res_total_force(body))
end = time.clock()
for k,v in bodies_in_graphics.iteritems():
n, N_fixed_n, is_body, body = self._interpretation_of_str_m_b(k)
self.bodies_in_graphics.update({n:v})
print( "finished ...",end-start )
def prepare(self, path='', save=True):
#transform back to produce a state vector in IF
n_body = self.n_body
self.state = hstack(zeros((n_body+1)*3)) # 3 includes 3d cartesians + 1 time
self.orient = hstack(zeros((n_body+1)*9)) #3 cartesians vectors e_x, e_y, e_z
self.con = hstack(zeros((n_body+1)*6)) # 3d cartesian vector from-to (info)
self.vis_body_frames = []
for n in range(len(self.vis_frame_coords)):
self.vis_body_frames.append(hstack(zeros(12))) #1 frame moving
self.vis_forces = []
for n in range(len(self.vis_force_coords)):
self.vis_forces.append(hstack(zeros(6))) # 1 force on body
self.e_kin = []
self.e_pot = []
self.e_tot = []
self.e_rot = []
self.speed = []
self.model_signals_results = {}
self.control_signals_results = []
self.calc_acc()
for ii in range(self.forces_models_n):
self.model_signals_results.update({ii: zeros(self.models[ii].get_signal_length())})
for ii in range(len(self.time)):
tau = self.time[ii]
f_t = [tau] + [ pf(tau) for oo in self.param_obj for pf in oo.get_func()]
#controll-signals:
# ???
x_act = hstack((self.x_t[ii,0:self.dof], f_t))
x_u_act = hstack((self.x_t[ii,0:self.dof*2], f_t))
q_flat_u_flat = hstack((self.x_t[ii,0:self.dof*2]))
x_u_a_act = hstack((self.x_t[ii,0:self.dof*2], self.acc[ii], f_t))
vx = self.pos_cartesians_lambda(*x_act) #transports x,y,z
orient = self.orient_cartesians_lambda(*x_act) #transports e_x und e_y
vc = self.connections_cartesians_lambda(*x_act)
for n in range(len(self.vis_frame_coords)):
vf = self.vis_frame_coords[n](*x_act)
self.vis_body_frames[n] = vstack((self.vis_body_frames[n], vf))
for n in range(len(self.vis_force_coords)):
fg = self.vis_force_coords[n](*x_u_a_act)
self.vis_forces[n] = vstack((self.vis_forces[n], fg))
speed = self.speed_lambda(*x_u_act)
e_kin = self.e_kin_lambda(*x_u_act)
e_rot = self.e_rot_lambda(*x_u_act)
e_pot = self.e_pot_lambda(*x_act)
for ii in range(self.forces_models_n):
F_T_model, model_signals = self.f_models_lamb[ii](x_u_act)
self.model_signals_results[ii] = vstack((self.model_signals_results[ii],model_signals))
self.control_signals_results.append([cf.lamb(*q_flat_u_flat) for cf in self.control_signals_obj])
self.state = vstack((self.state,vx))
self.orient = vstack((self.orient,orient))
self.con = vstack((self.con, vc))
self.e_rot.append(e_rot)
self.e_kin.append(e_kin)
self.e_pot.append(e_pot)
self.e_tot.append(e_kin+e_pot+e_rot)
self.speed.append(speed)
for ii in range(self.forces_models_n):
self.model_signals_results[ii] = self.model_signals_results[ii][1:]
if save and not no_pandas:
# currently only saving is supported....
if self.name == '':
store_filename = os.path.realpath(os.path.join(path+'data.h5'))
else:
store_filename = os.path.realpath(os.path.join(path,self.name+'.h5'))
self.store = pd.HDFStore(store_filename,complevel=2, complib='zlib')
self.store['state'] = pd.DataFrame(self.state[:,:3],columns=['x', 'y', 'z']) # 3 includes 3d cartesians
self.store['orient'] = pd.DataFrame(self.orient[:,:9],columns=['ex_x', 'ex_y', 'ex_z', 'ey_x', 'ey_y', 'ey_z', 'ez_x', 'ez_y', 'ez_z']) #2 cartesians vectors e_x, e_y
self.store['con'] = pd.DataFrame(self.con) # 3d cartesian vector from-to (info)
#here we must consider on how to store the data properly...
#self.store['vis_body_frames'] = pd.DataFrame(self.vis_body_frames) #1 frame moving
#self.store['vis_forces'] = pd.DataFrame(self.vis_forces) # 1 force on body
#self.store['vis_frame_coords'] = pd.DataFrame(self.vis_frame_coords)
#self.store['vis_force_coords'] = pd.DataFrame(self.vis_force_coords)
self.store['e_kin'] = pd.DataFrame(self.e_kin)
self.store['time_'] = pd.DataFrame(self.time)
self.store['x_t'] = pd.DataFrame(self.x_t)
self.store['acc'] = pd.DataFrame(self.acc)
self.store['e_pot'] = pd.DataFrame(self.e_pot)
self.store['e_tot'] = pd.DataFrame(self.e_tot)
self.store['e_rot'] = pd.DataFrame(self.e_rot)
self.store['speed'] = pd.DataFrame(self.speed)
for ii in range(self.forces_models_n):
self.store['model_signals_results_'+str(ii)] = pd.DataFrame(self.model_signals_results[ii])
# the load function must set up a mubodyn world object sufficient for animate()...
def plotting(self, t_max, dt, plots = 'standard'):
#plotting
if plots == 'standard':
n = len(self.q_flat)
n_max = int(t_max/dt)-2
plt.subplot(2, 1, 1)
lines = plt.plot(self.time[0:n_max], self.x_t[0:n_max, :n])
lab = plt.xlabel('Time [sec]')
leg = plt.legend(self.dynamic[:n])
plt.subplot(2, 1, 2)
lines = plt.plot(self.time, self.e_kin,self.time,self.e_rot,self.time, self.e_pot,self.time, self.e_tot)
lab = plt.xlabel('Time [sec]')
leg = plt.legend(['E_kin','E_rot', 'E_pot', 'E_full'])
plt.show()
#
elif plots == 'y-pos':
n = len(self.q_flat)
n_max = int(t_max/dt)-2
plt.subplot(2, 1, 1)
lines = plt.plot(self.time[0:n_max], self.state[0:n_max, 4])
lab = plt.xlabel('Time [sec]')
leg = plt.legend(["y-Pos."])
plt.subplot(2, 1, 2)
lines = plt.plot(self.time, self.e_kin,self.time,self.e_rot,self.time, self.e_pot,self.time, self.e_tot)
lab = plt.xlabel('Time [sec]')
leg = plt.legend(['E_kin','E_rot', 'E_pot', 'E_full'])
plt.show()
elif plots == 'tire':
plt.subplot(5, 1, 1)
lines = plt.plot(self.time, array(self.model_signals_results[0])[:,0], self.time, array(self.model_signals_results[0])[:,2])
#lab = plt.xlabel('Time [sec]')
leg1 = plt.legend(['Fx [N]', 'Fz [N]'])
plt.subplot(5, 1, 2)
lines = plt.plot(self.time, array(self.model_signals_results[0])[:,1])
#lab = plt.xlabel('Time [sec]')
leg1 = plt.legend(['Fy [N]'])
plt.subplot(5, 1, 3)
lines = plt.plot( self.time, array(self.model_signals_results[0])[:,3])
#lab = plt.xlabel('Time [sec]')
leg2 = plt.legend(['Tz [Nm]'])
plt.subplot(5, 1, 4)
lines = plt.plot( self.time, array(self.model_signals_results[0])[:,4])
lab = plt.xlabel('Time [sec]')
leg3 = plt.legend(['Slip [%]'])
plt.subplot(5, 1, 5)
lines = plt.plot( self.time, array(self.model_signals_results[0])[:,5])
lab = plt.xlabel('Time [sec]')
leg4 = plt.legend(['Alpha [grad]'])
plt.show()
elif plots == 'signals':
n_signals = len(self.control_signals_obj)
for n in range(n_signals):
plt.subplot(n_signals, 1, n+1)
lines = plt.plot(self.time, array(self.control_signals_results)[:,n])
leg = plt.legend(['Signal '+str(n)])
lab = plt.xlabel(self.control_signals_obj[n].name+" in "+self.control_signals_obj[n].unit)
plt.show()
def animate(self, t_max, dt, scale = 4, time_scale = 1, t_ani = 30., labels = False, center = -1, f_scale = 0.1, f_min = 0.2, f_max = 5.):
#stationary vectors:
a = animation(scale)
for fr in self.vis_fixed_frames:
a.set_stationary_frame(fr)
for n in range(len(self.vis_frame_coords)):
a.set_dynamic_frame(self.vis_body_frames[n])
for n in range(len(self.vis_force_coords)):
a.set_force(self.vis_forces[n], f_scale, f_min, f_max)
a = a.s_animation(self.state, self.orient, self.con, self.con_type, self.bodies_in_graphics, self.speed, dt, t_ani, time_scale, scale, labels = labels, center = center)
return a
def prepare_integrator_pp(self, x0, delta_t):
self.ode15s = ode(self.right_hand_side_ode)
self.ode15s.set_integrator('lsoda', method='lsoda', min_step = 1e-6, atol = 1e-6, rtol = 1e-5, with_jacobian=False)
self.ode15s.set_initial_value(x0, 0.)
self.delta_t = delta_t
def inte_grate_pp(self):
self.ode15s.integrate(self.ode15s.t+self.delta_t)
return self.ode15s.y, self.ode15s.t
def inte_grate_full(self, x0, t_max, delta_t, mode = 0, tolerance = 1.0):
global IF, O, g, t
self.time = linspace(0, t_max, int(t_max/delta_t))
print( "start integration ..." )
start = time.clock()
###
#some int stuff
if mode == 1:
ode15s = ode(self.right_hand_side_ode)
ode15s.set_integrator('lsoda', min_step = 1e-6, atol = 1e-6, rtol = 1e-7, with_jacobian=False)
#method = 'bdf'
ode15s.set_initial_value(x0, 0.)
self.x_t = x0
while ode15s.t < t_max:
ode15s.integrate(ode15s.t+delta_t)
self.x_t = vstack((self.x_t,ode15s.y))
elif mode == 0:
self.x_t = odeint(self.right_hand_side, x0, self.time, args=([0.,0.],) , hmax = 1.0e-1, hmin = 1.0e-7*tolerance, atol = 1e-5*tolerance, rtol = 1e-5*tolerance, mxords = 4, mxordn = 8)
end = time.clock()
print( "end integration ...", end-start )
def constr_lin(self, x_op, quad = False):
n_const = len(self.eq_constr)
dofs = range(self.dof)
c_cons = []
u_cons = []
for i in range(n_const):
self.eq_constr[i] = self.eq_constr[i].subs(self.kindiff_dict)
lam = []
#lam_lin = []
equ1a = symbols('equ1a')
equ1a = 0
for d in dofs:
equ = self.eq_constr[i]
#lam.append(equ)
term = equ.subs(x_op)+equ.diff(self.q_flat[d]).subs(x_op)*self.linfaktor(x_op, self.q_flat[d])
if quad:
term += 0.5* equ.diff(self.q_flat[d]).diff(self.q_flat[d]).subs(x_op)*(self.linfaktor(x_op, self.q_flat[d]))**2
lam.append(term)
equ1a += lam[-1].simplify()
c_cons.append(equ1a)
u_cons.append(equ1a.diff().subs(self.kindiff_dict))
print( "c_cons: ", c_cons )
return c_cons, u_cons
def linearize(self, x_op, a_op, quad = False):
"""
Function to prepare the linerarization process (kaneify_lin)
"""
#construct the new constraint equ from previous constraint equations:
n_const = len(self.eq_constr)
dofs = range(self.dof)
c_cons, u_cons = self.constr_lin(x_op, quad = quad)
#try to find the dependent and independent variables
q_ind = []
u_ind = []
q_dep = []
u_dep = []
for d in dofs:
dep = False
for eq in range(n_const):
x = sp_solve(c_cons[eq], self.q_flat[d])
if not len(x) == 0 and len(q_dep) < n_const:
q_dep.append(self.q_flat[d])
u_dep.append(self.u_flat[d])
dep = True
break
if not dep:
q_ind.append(self.q_flat[d])
u_ind.append(self.u_flat[d])
print( "ind: ",q_ind )
print( "dep: ",q_dep )
#repair the operation point to be consistent with the constraints
# and calc the backtrafo
self.equ_out = []
repl = []
n = n_const
c_copy = copy.copy(c_cons)
for q in q_dep:
for eq in range(n):
x = sp_solve(c_copy[eq], q)
if not len(x) == 0:
repl.append({q:x[0]})
#print q, x[0]
self.equ_out.append(x[0])
c_copy.pop(eq)
n = n-1
break
for eq in range(len(c_copy)):
c_copy[eq]=c_copy[eq].subs(repl[-1])
repl.reverse()
for eq in range(n_const):
for pair in repl:
self.equ_out[eq] = self.equ_out[eq].subs(pair) #order is relevant
self.mydict = dict(zip(q_dep,self.equ_out))
self.q_flat_lin = [term.subs(self.mydict) for term in self.q_flat]
self.back_trafo_q_all = lambdify( q_ind, self.q_flat_lin)
q_inp = []
for d in range(len(q_ind)):
q_inp.append(q_ind[d].subs(x_op))
q_z = self.back_trafo_q_all(*q_inp)
self.x_op_new = dict(zip(self.q_flat, q_z))
if n_const > 0:
self.forces = self.forces[0:-n_const] #pop the extra constraint forces
self.kaneify_lin(q_ind, u_ind, q_dep, u_dep, c_cons, u_cons, x_op, a_op)
def calc_Jacobian(self, n):
"""
Function to calculate the jacobian, after integration for calc-point no n.
:param n: the list-number of the integrated state vector
"""
t_op = self.time[n]
x_op = self.x_t[n]
eps = 1.0e-12
f0 = self.right_hand_side(x_op, t_op)
f_eps = []
dim = range(len(f0))
for n in dim:
x_op[n] += eps
f_eps.append(self.right_hand_side(x_op, t_op))
x_op[n] -= eps
jac = zeros(len(f0))
for n in dim:
line = []
for m in dim:
line = hstack((line,(f_eps[m][n]-f0[n])/eps))
jac = vstack((jac, line))
return jac[1:]
def calc_lin_analysis_n(self, n):
"""
Function to calculate linear anlysis (stability), after integration for calc-point no n.
:param n: the list-number of the integrated state vector
"""
jac = self.calc_Jacobian(n)
ev = eig(jac)[0]
print( "Eigenvalues: time [s] ",self.time[n] )
for e in range(len(ev)):
print( str(e)+"... ",ev[e] )
return jac
def linfaktor(self, x_op, q):
"""
Returns the linear factor of a single variable at value q.
:param x_op: the symbol
:param q: the value of the zero of this linear-factor
"""
x, x0 = symbols('x, x0')
equ = x-x0
equ1 = equ.subs({x0:q})
equ1 = equ1.subs(x_op)
equ1 = equ1.subs({x:q})
return equ1
| mit |
yonglehou/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
f3r/scikit-learn | sklearn/decomposition/truncated_svd.py | 30 | 7896 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
tsaoyu/D3HRE | D3HRE/core/mission_utility.py | 1 | 7731 | import numpy as np
import pandas as pd
import nvector as nv
from math import radians, cos, sin, asin, sqrt
from datetime import timedelta
from D3HRE.core.get_hash import hash_value
from D3HRE.core.dataframe_utility import full_day_cut
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
:param lon1: longitude of point 1
:param lat1: latitude of point 1
:param lon2: longitude of point 2
:param lat2: latitude of point 2
:return: great circle distance between two points in km
"""
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def distance_between_waypoints(way_points):
"""
Use Haversine method to calculate distance between great circle.
:param way_points: array a list of way points
:return: np array Haversine distance between two way points
in km
"""
distance = np.array(
[
haversine(v[0], v[1], w[0], w[1])
for v, w in zip(way_points[:-1], way_points[1:])
]
)
return distance
def journey_timestamp_generator(start_date, way_points, speed):
"""
Journey timestamp generator is an utility function use way points
and speed to generate Timestamp index for the construction of Pandas dataFrame
:param start_date: pandas Timestamp in UTC
:param way_points: array lik List way points
:param speed: float or int speed in km/h
:return: pandas Timestamp index
"""
distance = distance_between_waypoints(way_points)
time = distance / speed
cumtime = np.cumsum(time)
timestamps = [start_date + timedelta(hours=t) for t in cumtime]
timestamps.insert(0, start_date)
return timestamps
def position_dataframe(start_date, way_points, speed):
"""
Generate position dataFrame at one hour resolution with given way points.
:param start_date: pandas Timestamp in UTC
:param way_points: np array way points
:param speed: float or array speed in km/h
:return: pandas DataFrame with indexed position at one hour resolution
"""
timeindex = journey_timestamp_generator(start_date, way_points, speed)
latTS = pd.Series(way_points[:, 0], index=timeindex).resample('1H').mean()
lonTS = pd.Series(way_points[:, 1], index=timeindex).resample('1H').mean()
# Custom interpolation calculate latitude and longitude of platform at each hour
lTs = latTS.copy()
x = (
lTs.isnull()
.reset_index(name='null')
.reset_index()
.rename(columns={"level_0": "order"})
)
x['block'] = (x['null'].shift(1) != x['null']).astype(int).cumsum()
block_to_fill = x[x['null']].groupby('block')['order'].apply(np.array)
def find_start_and_end_location(block_index):
start_index = block_index[0] - 1
end_index = block_index[-1] + 1
lat1 = latTS.iloc[start_index]
lon1 = lonTS.iloc[start_index]
lat2 = latTS.iloc[end_index]
lon2 = lonTS.iloc[end_index]
n = end_index - start_index
lat_lon1 = lat1, lon1
lat_lon2 = lat2, lon2
return [lat_lon1, lat_lon2, n]
def way_points_interp(location_block):
lat_lon1 = location_block[0]
lat_lon2 = location_block[1]
n = location_block[2]
wgs84 = nv.FrameE(name='WGS84')
lat1, lon1 = lat_lon1
lat2, lon2 = lat_lon2
n_EB_E_t0 = wgs84.GeoPoint(lat1, lon1, degrees=True).to_nvector()
n_EB_E_t1 = wgs84.GeoPoint(lat2, lon2, degrees=True).to_nvector()
path = nv.GeoPath(n_EB_E_t0, n_EB_E_t1)
interpolate_coor = [[lat1, lon1]]
piece_fraction = 1 / n
for n in range(n - 1):
g_EB_E_ti = path.interpolate(piece_fraction * (n + 1)).to_geo_point()
interpolate_coor.append(
[g_EB_E_ti.latitude_deg[0], g_EB_E_ti.longitude_deg[0]]
)
return interpolate_coor
way_interpolated = np.array([])
for block in block_to_fill:
way_interp = way_points_interp(find_start_and_end_location(block))
way_interpolated = np.append(way_interpolated, way_interp)
way_interpolated = np.append(way_interpolated, [latTS.iloc[-1], lonTS.iloc[-1]])
locations = way_interpolated.reshape(-1, 2)
mission = pd.DataFrame(data=locations, index=latTS.index, columns=['lat', 'lon'])
if isinstance(speed, int) or isinstance(speed, float):
speedTS = speed
else:
speed = np.append(speed, speed[-1])
speedTS = pd.Series(speed, index=timeindex).resample('1H').mean()
mission['speed'] = speedTS
mission.fillna(method='ffill', inplace=True)
# Convert UTC time into local time
def find_timezone(array, value):
idx = (np.abs(array - value)).argmin()
return idx - 12
lons = np.linspace(-180, 180, 25)
local_time = []
for index, row in mission.iterrows():
local_time.append(index + timedelta(hours=int(find_timezone(lons, row.lon))))
# time difference into timedelta
# t_diff = list(map(lambda x: timedelta(hours=x), time_diff))
# local_time = mission.index + t_diff
mission['local_time'] = local_time
return mission
def get_mission(start_time, route, speed):
"""
Calculate position dataFrame at given start time, route and speed
:param start_time: str or Pandas Timestamp, the str input should have format YYYY-MM-DD close the day
:param route: numpy array shape (n,2) list of way points formatted as [lat, lon]
:param speed: int, float or (n) list, speed of platform unit in km/h
:return: Pandas dataFrame
"""
if type(start_time) == str:
start_time = pd.Timestamp(start_time)
position_df = full_day_cut(position_dataframe(start_time, route, speed))
return position_df
def nearest_point(lat_lon):
"""
Find nearest point in a 1 by 1 degree grid
:param lat_lon: tuple (lat, lon)
:return: nearest coordinates tuple
"""
lat, lon = lat_lon
lat_coords = np.arange(-90, 90, dtype=int)
lon_coords = np.arange(-180, 180, dtype=int)
def find_closest_coordinate(calc_coord, coord_array):
index = np.abs(coord_array - calc_coord).argmin()
return coord_array[index]
lat_co = find_closest_coordinate(lat, lat_coords)
lon_co = find_closest_coordinate(lon, lon_coords)
return lat_co, lon_co
class Mission:
def __init__(self, start_time=None, route=None, speed=None):
if start_time is None or route is None or speed is None:
print('Please use custom mission setting.')
else:
self.start_time = start_time
self.route = route
self.speed = speed
self.df = get_mission(self.start_time, self.route, self.speed)
self.get_ID()
def __str__(self):
return "This mission {ID} is start from {a} at {b} UTC.".format(
a=self.route[0], b=self.start_time, ID=self.ID
)
def custom_set(self, mission_df, ID):
self.df = mission_df
self.ID = ID
def get_ID(self):
route_tuple = tuple(self.route.flatten().tolist())
if isinstance(self.speed, list):
speed_tuple = tuple(self.speed)
else:
speed_tuple = self.speed
ID_tuple = (self.start_time, route_tuple, speed_tuple)
self.ID = hash_value(ID_tuple)
return self.ID
if __name__ == '__main__':
pass
| gpl-3.0 |
n7jti/machine_learning | adaboost/newsgroups.py | 1 | 4192 | #!/usr/bin/python2
from scipy import *
import scipy.sparse as sp
import scipy.linalg as la
#See http://scikit-learn.org/stable/modules/feature_extraction.html
import sklearn.feature_extraction as fe
import tok
import dstump as ds
import pylab as pl
import numpy as np
import operator
from datetime import datetime
def adaboost_train (x, y, T):
cf = x.shape[1]
n = y.shape[0]
weights = ones(n)/n
H = []
A = []
I = []
TE = []
for t in range(T):
pplus = sum(weights * (y > 0))
# Let's train on all the features and find the one that works the best
decisionVariables = []
score = []
we = []
for idx in range(cf):
f = x[:,idx]
# train the stump
(dv, err) = ds.stump_fit(f, y, weights, pplus)
we.append( err )
decisionVariables.append(dv)
# score the classifiers on all features for this round
score.append(abs(.5-err))
print "Round: ", t, str(datetime.now())
# choose the one feature we'll use for this round's classifier
I.append(np.argmax(score))
H.append(decisionVariables[I[t]])
eps = we[I[t]]
# calculate our alpha
A.append(.5 * math.log((1-eps)/eps))
# update the weights
numerators = weights * np.exp( -A[t] * y * ds.stump_predict(x[:,I[t]], H[t]) )
Z = numerators.sum()
weights = numerators / Z
# Calculate the overall training errors
y_hat = adaboost_predict(A,H,I,x, len(A))
TE.append((y_hat * y < 0).sum() / float(n))
return A, H, I, TE
def adaboost_predict (A, H, I, x, t):
n=x.shape[0]
out = np.zeros(n)
for i in range(t):
out += A[i] * ds.stump_predict(x[:,I[i]], H[i])
return np.sign(out);
def adaboost_find_t (A, H, I, x, y):
n=x.shape[0]
out = np.zeros(n)
t=len(A)
HE = []
for i in range(t):
out += A[i] * ds.stump_predict(x[:,I[i]], H[i])
HE.append( (np.sign(out) * y < 0).sum() / float(n) )
idx = np.argmin(HE)
return HE, idx + 1
def main ():
#Read text, try removing comments, headers ... See tok.py for implementation.
corpus = tok.fill_corpus(["alt.atheism", "comp.windows.x"])
#corpus = tok.fill_corpus(["alt.atheism", "soc.religion.christian"])
#Create training data
ctr = reduce(list.__add__, map(lambda x: x[:600], corpus))
ytr = zeros(len(ctr)); ytr[:600] = -1; ytr[600:] = 1
#Train a bag-of-words feature extractor.
#You're free to play with the parameters of fe.text.TfidfVectorizer, but your answers
#*should be* answered for the parameters given here. You can find out more about these
#on the scikits-learn documentation site.
tfidf = fe.text.TfidfVectorizer(min_df=5, ngram_range=(1, 4), use_idf=True, encoding="ascii")
#Train the tokenizer.
ftr = tfidf.fit_transform(ctr)
ftr = ftr.tocsc()
#This maps features back to their text.
feature_names = tfidf.get_feature_names()
m = 30
#This shouldn't take more than 20m.
A, H, I , TE = adaboost_train(ftr, ytr, m)
for i in range(m):
print "T", i, "index:", I[i], "feature name:", feature_names[I[i]]
# Plot
pl.subplot(2,1,1)
pl.xlabel('steps of adaboost')
pl.ylabel('magnitude of alpha')
pl.plot(np.abs(A),'o')
pl.subplot(2,1,2)
#pl.axis([0,50,0,.5])
pl.xlabel('steps of adaboost')
pl.ylabel('training error')
pl.plot(TE,'o')
pl.show()
#Create validation data
cva = reduce(list.__add__, map(lambda x: x[600:800], corpus))
yva = zeros(len(cva)); yva[:200] = -1; yva[200:] = 1
#tfidf tokenizer is not trained here.
fva = tfidf.transform(cva).tocsc()
#<Validation code goes here>
HE, t = adaboost_find_t(A,H,I,fva, yva)
print "t", t
A = A[:t]
H = H[:t]
I = I[:t]
S = np.vstack((A,H,I))
np.savetxt("matrix1.out", S);
pl.clf()
pl.plot(HE,'o')
pl.show()
#Create test data
#Some lists have less than a thousand mails. You may have to change this.
cte = reduce(list.__add__, map(lambda x: x[800:], corpus))
yte = zeros(len(cte)); yte[:200] = -1; yte[200:] = 1
fte = tfidf.transform(cte).tocsc()
#<Testing code goes here>
y_hat = adaboost_predict(A,H,I,fte,t)
err = (y_hat * yte < 0).sum() / float(yte.shape[0])
print "err", err
if __name__ == "__main__":
main()
| apache-2.0 |
mhue/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
pradyu1993/scikit-learn | sklearn/svm/tests/test_bounds.py | 6 | 2069 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = 'Test l1_min_c loss=%r %s %s %s' % \
(loss, X_label, Y_label, intercept_label)
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or \
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.