repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
open-risk/concentration_library
|
examples/python/confidence_interval_example.py
|
1
|
1598
|
# encoding: utf-8
# (c) 2016-2020 Open Risk, all rights reserved
#
# ConcentrationMetrics is licensed under the MIT license a copy of which is included
# in the source distribution of concentrationMetrics. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import concentrationMetrics as cm
import matplotlib.pyplot as plt
dataset_path = cm.source_path + "datasets/"
# Bootstraped confidence intervals
# Select a portfolio of exposures
# Zipf distribution
# a = 1.7 # zipf parameter
# portfolio = np.random.zipf(a, 100)
# Normal distribution
portfolio = np.random.normal(loc=10, scale=1, size=100)
# Simple calculation
myIndex = cm.Index()
value = myIndex.compute(portfolio, index='hhi')
print('New API: ', value)
print("HHI Value ", myIndex.hhi(portfolio))
# Simple calculation with argument
value = myIndex.compute(portfolio, 3, index='hk')
print('New API: ', value)
print("HK Value ", myIndex.hk(portfolio, 3))
# Confidence interval calculation
lower_bound, value, upper_bound = myIndex.compute(portfolio, index='hhi', ci=0.95, samples=10000)
print("Lower Bound: ", lower_bound)
print("Value: ", value)
print("Upper Bound: ", upper_bound)
|
mit
|
kchodorow/tensorflow
|
tensorflow/examples/learn/iris.py
|
19
|
1651
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = tf.contrib.learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
dcwangmit01/options-screener
|
app/datareader.py
|
1
|
7351
|
import os
import time
import sys
import re
import json
import pandas as pd
from selenium import webdriver
import selenium.webdriver.chrome.service as service
from selenium.webdriver.support.ui import Select
from pandas_datareader.data import Options
import requests_cache
#####################################################################
# Settings
# cache settings
seconds_to_cache = 180 # seconds
seconds_to_pause = 3 # seconds
yahoo_columns = [
'Strike', 'Expiry', 'Type', 'Symbol', 'Last', 'Bid', 'Ask', 'Chg', 'PctChg', 'Vol', 'Open_Int', 'IV', 'Root',
'IsNonstandard', 'Underlying', 'Underlying_Price', 'Quote_Time', 'Last_Trade_Date', 'JSON'
]
common_columns = [
'strike', 'expiry', 'type', 'symbol', 'lst', 'bid', 'ask', 'chg', 'vol', 'oi', 'root', 'nonstandard', 'underlying',
'underlyingprice', 'quotetime'
]
class Datareader(object):
""" This class is unused """
def __init__(self):
# Create the requests cache
self.session = requests_cache.CachedSession(cache_name='cache', backend='sqlite', expire_after=seconds_to_cache)
def yahoo_options_dataframe(self, ticker):
# fetch all data
option = Options(ticker, 'yahoo', session=self.session)
df = option.get_all_data()
# reset_index()
# copies multi-index values into columns
# sets index to single ordinal integer
df.reset_index(inplace=True)
# rename a bunch of the columns
df.rename(index=str,
inplace=True,
columns={
'Strike': 'strike',
'Expiry': 'expiry',
'Type': 'type',
'Symbol': 'symbol',
'Last': 'lst',
'Bid': 'bid',
'Ask': 'ask',
'Chg': 'chg',
'Vol': 'vol',
'Open_Int': 'oi',
'Root': 'root',
'IsNonstandard': 'nonstandard',
'Underlying': 'underlying',
'Underlying_Price': 'underlyingprice',
'Quote_Time': 'quotetime'
})
# delete unnecessary columns
df.drop('PctChg', axis=1, inplace=True)
df.drop('IV', axis=1, inplace=True)
df.drop('Last_Trade_Date', axis=1, inplace=True)
df.drop('JSON', axis=1, inplace=True)
# normalize values for type column
df['type'] = df.apply(lambda row: 'call' if row['type'] == 'calls' else 'put', axis=1)
return df
def schwab_options_dataframe(self, ticker):
schwab = SchwabBrowser.singleton()
schwab.start()
schwab.login()
url = ('https://client.schwab.com/trade/options/optionChainsJson.ashx' '?autopage=true&symbol=' + ticker)
schwab.get(url)
options_dict = json.loads(self.striphtml(schwab.page_source()))
df = pd.DataFrame(columns=common_columns)
i = 0
for root in options_dict['Roots']:
_underlying = options_dict['UnderLying']
_underlying_price = 0 # TODO
_quotetime = pd.to_datetime(options_dict['TimeStamp'])
_adjusted = True if root['IsAdjusted'] == 'Y' else False
_root = root['Root']
for expiration in root['Expirations']:
_expiry = pd.to_datetime(expiration['Date'])
for strike in expiration['Strikes']:
_strike = strike['Price']
for option in ['Call', 'Put']:
_type = option.lower()
_lst = strike[option]['Lst']
_chg = strike[option]['Chg']
_bid = strike[option]['Bid']
_ask = strike[option]['Ask']
_vol = strike[option]['Vol']
_oi = strike[option]['OI']
_symbol = ('{0}{1:02d}{2:02d}{3:02d}{4}{5:08d}'.format(_underlying, _expiry.year - 2000,
_expiry.month, _expiry.day,
"C" if _type == "call" else "P",
int(_strike * 1000)))
df.loc[i] = [
_strike, _expiry, _type, _symbol, _lst, _bid, _ask, _chg, _vol, _oi, _root, _adjusted,
_underlying, _underlying_price, _quotetime
]
i += 1
return df
def striphtml(self, data):
p = re.compile(r'<.*?>')
return p.sub('', data)
class SchwabBrowser(object):
""" This class is unused """
_Singleton = None
@staticmethod
def singleton():
if SchwabBrowser._Singleton is None:
SchwabBrowser._Singleton = SchwabBrowser()
return SchwabBrowser._Singleton
def __init__(self):
# Find the SCHWAB_USER and SCHWAB_PASSWORD through environment variable
if 'SCHWAB_USER' not in os.environ:
print("SCHWAB_USER must be defined in environment")
sys.exit(1)
if 'SCHWAB_PASSWORD' not in os.environ:
print("SCHWAB_PASSWORD must be defined in environment")
sys.exit(1)
self.SCHWAB_USER = os.environ['SCHWAB_USER']
self.SCHWAB_PASSWORD = os.environ['SCHWAB_PASSWORD']
self.service = service.Service('chromedriver')
self.browser = None
self.is_started = False
self.is_logged_in = False
def start(self):
if self.is_started is False:
self.service.start()
capabilities = {'chrome.binary': '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'}
self.browser = (webdriver.Remote(self.service.service_url, capabilities))
self.is_started = True
return
def login(self):
self.start()
assert (self.is_started is True)
assert (self.browser is not None)
# Don't login twice
if self.is_logged_in is True:
return True
#######################################
# Connect to Schwab and login
br = self.browser
self.get('https://client.schwab.com/Login/SignOn/CustomerCenterLogin.aspx')
user = br.find_element_by_id("ctl00_WebPartManager1_CenterLogin_LoginUserControlId_txtLoginID")
user.send_keys(self.SCHWAB_USER)
pass_ = br.find_element_by_name("txtPassword")
pass_.send_keys(self.SCHWAB_PASSWORD)
select = Select(br.find_element_by_id('ctl00_WebPartManager1_CenterLogin_LoginUserControlId_drpStartPage'))
select.select_by_visible_text('Research')
submit = br.find_element_by_name("btnLogin")
submit.click() # This blocks until page loads but AJAX may continue
time.sleep(5) # Wait additional time for the research page to load
self.is_logged_in = True
return
def get(self, url):
self.browser.get(url)
time.sleep(seconds_to_pause)
return self.browser
def page_source(self):
return self.browser.page_source
def stop(self):
self.service.stop()
|
mit
|
nkhuyu/data-science-from-scratch
|
code/visualizing_data.py
|
58
|
5116
|
import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
|
unlicense
|
allinpaybusiness/ACS
|
TLSW_pred/fyzpred02/fyz_pred_02.py
|
1
|
4127
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
生产模型:creditscore_TLSW_fyz.creditscore_randomforest
"""
import sys
import time
import pandas as pd
import numpy as np
from sklearn.externals import joblib
"""
parameters = {'idCard':'530125198606102726', 'mobileNum':'13619662783', 'education':'高中', 'maritalStatus':'22',
'company':'三营村委会', 'MON_6_var1':'5', 'RFM_6_var1':'18499.81', 'RFM_6_var2':'19',
'RFM_6_var3':'9000', 'RFM_6_var5':'973.67', 'MCC_6_var1':'5', 'LOC_6_var11':'云南省', 'RFM_6_var6':'3',
'RFM_12_var30':'6000', 'RFM_12_var40':'0', 'RFM_12_var47':'0', 'cot_score':'654', 'cna_score':'4',
'cst_score':'3', 'cnt_score':'4', 'chv_score':'3', 'dsi_score':'351', 'rsk_score':'143',
'wlp_score':'4', 'crb_score':'0.005230373331', 'summary_score':'496', 'cnp_score':'2'}
"""
def fyz_pred(parameters):
#生成X_test
X_test = pd.DataFrame(parameters, index=[0])
###建立测试数据dataframe
if int(X_test.loc[0, 'idCard'][(len(X_test.loc[0, 'idCard'])-2):(len(X_test.loc[0, 'idCard'])-1)]) % 2 == 0:
X_test['sexId'] = '2'
else:
X_test['sexId'] = '1'
X_test['phone3'] = str(X_test.loc[0, 'mobileNum'])[0:3]
X_test['age'] = time.localtime().tm_year - int(X_test.loc[0, 'idCard'][6:10])
if X_test.loc[0, 'company'] in ['NULL', '不详', '无', '无业', '待业人员' ]:
X_test['company'] = 0
else:
X_test['company'] = 1
X_test['MCC_6_var1'] = pd.to_numeric(X_test['MCC_6_var1'], errors='coerce')
X_test['MON_6_var1'] = pd.to_numeric(X_test['MON_6_var1'], errors='coerce')
X_test['RFM_6_var1'] = pd.to_numeric(X_test['RFM_6_var1'], errors='coerce')
X_test['RFM_6_var2'] = pd.to_numeric(X_test['RFM_6_var2'], errors='coerce')
X_test['RFM_6_var3'] = pd.to_numeric(X_test['RFM_6_var3'], errors='coerce')
X_test['RFM_6_var5'] = pd.to_numeric(X_test['RFM_6_var5'], errors='coerce')
X_test['RFM_12_var30'] = pd.to_numeric(X_test['RFM_12_var30'], errors='coerce')
X_test['RFM_12_var40'] = pd.to_numeric(X_test['RFM_12_var40'], errors='coerce')
X_test['RFM_12_var47'] = pd.to_numeric(X_test['RFM_12_var47'], errors='coerce')
X_test = X_test.drop(['idCard', 'mobileNum', 'rsk_score'], axis = 1)
###处理null值
fillna_value = joblib.load('TLSW_pred\\fyzpred02\\fillna_value_fyz_randomforest.pkl')
for col in X_test.columns:
if any(fillna_value.columns == col):
X_test[col] = X_test[col].fillna(fillna_value.loc[0, col])
###提取数据预处理模型
binandwoe = joblib.load('TLSW_pred\\fyzpred02\\binandwoe_fyz_randomforest.pkl')
cols = binandwoe[0]
binmodel = binandwoe[1]
woemodel = binandwoe[2]
###逐列处理数据
for col in X_test.columns:
###bin
if col in cols:
ix = cols.index(col)
breakpoints = binmodel[ix]
labels = np.arange(len(breakpoints) - 1)
X_test[col] = pd.cut(X_test[col],bins=breakpoints,right=True,labels=labels,include_lowest=True)
X_test[col] = X_test[col].astype('object')
else:
X_test[col] = X_test[col].astype('object')
###woe
if any(woemodel['col'] == col):
woecol = woemodel[woemodel['col'] == col]
if any(woecol['cat'] == X_test.loc[0, col]):
X_test[col] = woecol.loc[woecol['cat'] == X_test.loc[0, col], 'woe']
else:
X_test[col] = 0
else:
X_test[col] = 0
###提取最终参与建模的列
testcolumns = joblib.load('TLSW_pred\\fyzpred02\\testcolumns_fyz_randomforest.pkl')
X_test = X_test[testcolumns]
###提取违约概率模型
classifier = joblib.load('TLSW_pred\\fyzpred02\\classifier_fyz_randomforest.pkl')
probability = classifier.predict_proba(X_test)
###转换概率至评分
riskscore = str(int(300 + probability[0][0] * 700))
return riskscore
|
apache-2.0
|
shusenl/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
70
|
31674
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
|
bsd-3-clause
|
vighneshbirodkar/scikit-image
|
skimage/filters/_gabor.py
|
23
|
6926
|
import numpy as np
from scipy import ndimage as ndi
from .._shared.utils import assert_nD
__all__ = ['gabor_kernel', 'gabor']
def _sigma_prefactor(bandwidth):
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * \
(2.0 ** b + 1) / (2.0 ** b - 1)
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None,
n_stds=3, offset=0):
"""Return complex 2D Gabor filter kernel.
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g : complex array
Complex filter kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
if sigma_x is None:
sigma_x = _sigma_prefactor(bandwidth) / frequency
if sigma_y is None:
sigma_y = _sigma_prefactor(bandwidth) / frequency
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.exp(1j * (2 * np.pi * frequency * rotx + offset))
return g
def gabor(image, frequency, theta=0, bandwidth=1, sigma_x=None,
sigma_y=None, n_stds=3, offset=0, mode='reflect', cval=0):
"""Return real and imaginary responses to Gabor filter.
The real and imaginary parts of the Gabor filter kernel are applied to the
image and the response is returned as a pair of arrays.
Gabor filter is a linear filter with a Gaussian kernel which is modulated
by a sinusoidal plane wave. Frequency and orientation representations of
the Gabor filter are similar to those of the human visual system.
Gabor filter banks are commonly used in computer vision and image
processing. They are especially suitable for edge detection and texture
classification.
Parameters
----------
image : 2-D array
Input image.
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations.
offset : float, optional
Phase offset of harmonic function in radians.
mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
Mode used to convolve image with a kernel, passed to `ndi.convolve`
cval : scalar, optional
Value to fill past edges of input if `mode` of convolution is
'constant'. The parameter is passed to `ndi.convolve`.
Returns
-------
real, imag : arrays
Filtered images using the real and imaginary parts of the Gabor filter
kernel. Images are of the same dimensions as the input one.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor
>>> from skimage import data, io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> image = data.coins()
>>> # detecting edges in a coin image
>>> filt_real, filt_imag = gabor(image, frequency=0.6)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(filt_real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # less sensitivity to finer details with the lower frequency kernel
>>> filt_real, filt_imag = gabor(image, frequency=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(filt_real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
assert_nD(image, 2)
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, n_stds,
offset)
filtered_real = ndi.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndi.convolve(image, np.imag(g), mode=mode, cval=cval)
return filtered_real, filtered_imag
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/tests/test_optional_dependency.py
|
1
|
1460
|
import sys
import types
import pytest
from pandas.compat._optional import VERSIONS, import_optional_dependency
import pandas.util.testing as tm
def test_import_optional():
match = "Missing .*notapackage.* pip .* conda .* notapackage"
with pytest.raises(ImportError, match=match):
import_optional_dependency("notapackage")
result = import_optional_dependency("notapackage", raise_on_missing=False)
assert result is None
def test_xlrd_version_fallback():
pytest.importorskip('xlrd')
import_optional_dependency("xlrd")
def test_bad_version():
name = 'fakemodule'
module = types.ModuleType(name)
module.__version__ = "0.9.0"
sys.modules[name] = module
VERSIONS[name] = '1.0.0'
match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
with pytest.raises(ImportError, match=match):
import_optional_dependency("fakemodule")
with tm.assert_produces_warning(UserWarning):
result = import_optional_dependency("fakemodule", on_version="warn")
assert result is None
module.__version__ = "1.0.0" # exact match is OK
result = import_optional_dependency("fakemodule")
assert result is module
def test_no_version_raises():
name = 'fakemodule'
module = types.ModuleType(name)
sys.modules[name] = module
VERSIONS[name] = '1.0.0'
with pytest.raises(ImportError, match="Can't determine .* fakemodule"):
import_optional_dependency(name)
|
bsd-3-clause
|
jvbalen/catchy
|
base_features.py
|
1
|
5090
|
from __future__ import division, print_function
import os
import numpy as np
import pandas as pd
import sys
import librosa
import vamp
import utils
""" This module provides an interface to several existing audio feature time
series extractors.
Requires Librosa to be installed, and optional Vamp plug-ins.
"""
def compute_and_write(audio_dir, data_dir, features=None):
"""Compute frame-based features for all audio files in a folder.
Args:
audio_dir (str): where to find audio files
data_dir (str): where to write features
features (dict): dictionary with feature extraction functions, indexed
by feature name.
Feature extraction functions should return a time 1d-array of
frame times and a 2d-array of feature frames.
Feature name will be used as the subdirectory to
which feature CSVs are written.)
"""
if features is None:
features = {'mfcc': get_mfcc,
'hpcp': get_hpcp, 'melody': get_melody,
'beats': get_beats, 'onsets': get_onsets}
filenames = os.listdir(audio_dir)
for filename in filenames:
if filename.endswith('.wav') or filename.endswith('.mp3'):
print("Computing features for file {}...".format(filename))
x, sr = librosa.load(os.path.join(audio_dir, filename), mono=True)
for feature in features:
func = features[feature]
t, X = func(x, sr)
track_id = filename.split('.')[-2]
utils.write_feature([t, X], [data_dir, feature, track_id])
def get_mfcc(x, sr, n_mfcc=20):
"""Compute MFCC features from raw audio, using librosa.
Librosa must be installed.
Args:
x (1d-array) audio signal, mono
sr (int): sample rate
n_mfcc (int): number of coefficients to retain
Returns:
2d-array: MFCC features
"""
mfcc_all = librosa.feature.mfcc(x, sr)
n_coeff, n_frames = mfcc_all.shape
t = librosa.frames_to_time(np.arange(n_frames), sr=sr, hop_length=512)
return t, mfcc_all[:n_mfcc].T
def get_hpcp(x, sr, n_bins=12, f_min=55, f_ref=440.0, min_magn=-100):
"""Compute HPCP features from raw audio using the HPCP Vamp plugin.
Vamp, vamp python module and plug-in must be installed.
Args:
x (1d-array): audio signal, mono
sr (int): sample rate
n_bins (int): number of chroma bins
f_min (float): minimum frequency
f_ref (float): A4 tuning frequency
min_magn (float): minimum magnitude for peak detection, in dB
Returns:
1d-array: time vector
2d-array: HPCP features
"""
plugin = 'vamp-hpcp-mtg:MTG-HPCP'
params = {'LF': f_min, 'nbins': n_bins, 'reff0': f_ref,
'peakMagThreshold': min_magn}
data = vamp.collect(x, sr, plugin, parameters=params)
vamp_hop, hpcp = data['matrix']
t = float(vamp_hop) * (8 + np.arange(len(hpcp)))
return t, hpcp
def get_melody(x, sr, f_min=55, f_max=1760, min_salience=0.0, unvoiced=True):
"""Extract main melody from raw audio using the Melodia Vamp plugin.
Vamp, vamp python module and plug-in must be installed.
Args:
x (np.array): audio signal, mono
sr (int): sample rate
f_min (float): minimum frequency
f_max (float): maximum frequency
Return:
1d-array: time vector
1d-array: main melody (in cents)
"""
plugin = 'mtg-melodia:melodia'
params = {'minfqr': f_min, 'maxfqr': f_max,
'minpeaksalience': min_salience}
data = vamp.collect(x, sr, plugin, parameters=params)
vamp_hop, f0 = data['vector']
if unvoiced:
f0 = abs(f0)
f0[f0 == 0] = None
else:
f0[f0 <= 0] = None
hz2midi = lambda f: 69 + 12 * np.log2(abs(f) / 440)
melody = hz2midi(f0)
melody = melody[:, np.newaxis]
t = float(vamp_hop) * (8 + np.arange(len(melody)))
return t, melody
def get_beats(x, sr):
"""Track beats in an audio excerpt, using librosa's standard
beat tracker.
Args:
x (1d-array) audio signal, mono
sr (int): sample rate
Returns:
2d-array: beat times and beat intervals
"""
_, beat_frames = librosa.beat.beat_track(x, sr=sr)
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
t = beat_times[:-1,]
beat_intervals = np.diff(beat_times)
return t, beat_intervals
def get_onsets(x, sr):
"""Compute inter-onset intervals (IOI) from audio, using librosa.
Args:
x (1d-array) audio signal, mono
sr (int): sample rate
Returns:
2d-array: onset times and IOI
"""
onset_frames = librosa.onset.onset_detect(x, sr=sr)
onset_times = librosa.frames_to_time(onset_frames, sr=sr)
t = onset_times[:-1,]
onset_intervals = np.diff(onset_times)
return t, onset_intervals
if __name__ == '__main__':
compute_and_write(sys.argv[1], sys.argv[2])
|
mit
|
hammerlab/immuno
|
immuno/mhc_random.py
|
1
|
1775
|
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pandas as pd
from peptide_binding_measure import IC50_FIELD_NAME, PERCENTILE_RANK_FIELD_NAME
def generate_scored_epitopes(mutated_regions, alleles = ['HLA-A*02:01']):
records = []
# if wer'e not running the MHC prediction then we have to manually
# extract 9mer substrings
for _, row in mutated_regions.iterrows():
seq = row.SourceSequence
epitope_length = 9
for i in xrange(len(seq) - epitope_length + 1):
for allele in alleles:
record = {}
record['Epitope'] = seq[i:i+epitope_length]
record['EpitopeStart'] = i
record['EpitopeEnd'] = i + epitope_length
record['SourceSequence'] = seq
record[PERCENTILE_RANK_FIELD_NAME] = random.randint(0,99)
record[IC50_FIELD_NAME] = random.random() * 10000.0
record['Allele'] = allele
for k, v in row.iteritems():
if k not in record:
record[k] = v
records.append(record)
scored_epitopes = pd.DataFrame.from_records(records)
return scored_epitopes
|
apache-2.0
|
RayMick/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
244
|
6011
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
talonchandler/dipsim
|
paper/figures/triple-arm.py
|
1
|
6766
|
from dipsim import multiframe, util
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patches as patches
import os; import time; start = time.time(); print('Running...')
import matplotlib.gridspec as gridspec
# Main input parameters
col_labels = ['Geometry\n(NA${}_{\\textrm{upper}}$ = 0.6, NA${}_{\\textrm{lower}}$ = 1.1)', r'$\sigma_{\Omega}$ [sr]', 'Median$\{\sigma_{\Omega}\}$ [sr]', 'MAD$\{\sigma_{\Omega}\}$ [sr]']
fig_labels = ['a)', 'b)', 'c)', 'd)']
n_pts = 1000 # Points on sphere
n_pts_sphere = 50000 # Points on sphere
n_grid_pts = 25
inch_fig = 5
dpi = 300
# Setup figure and axes
fig = plt.figure(figsize=(2.2*inch_fig, 2*inch_fig))
gs0 = gridspec.GridSpec(2, 2, wspace=0.4, hspace=0.1)
gs00 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[0,0], width_ratios=[1, 0.05], wspace=0.1)
gs10 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[1,0], width_ratios=[1, 0.05], wspace=0.1)
gs01 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[0,1], width_ratios=[1, 0.05], wspace=0.1)
gs11 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[1,1], width_ratios=[1, 0.05], wspace=0.1)
ax0 = plt.subplot(gs00[0])
ax1 = plt.subplot(gs01[0])
ax2 = plt.subplot(gs10[0])
ax3 = plt.subplot(gs11[0])
cax0 = plt.subplot(gs00[1]); cax0.axis('off');
cax1 = plt.subplot(gs01[1])
cax2 = plt.subplot(gs10[1])
cax3 = plt.subplot(gs11[1])
for ax, col_label, fig_label in zip([ax0, ax1, ax2, ax3], col_labels, fig_labels):
ax.annotate(col_label, xy=(0,0), xytext=(0.5, 1.05), textcoords='axes fraction',
va='bottom', ha='center', fontsize=14, annotation_clip=False)
ax.annotate(fig_label, xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction',
va='bottom', ha='center', fontsize=14, annotation_clip=False)
for ax in [ax0, ax1, ax2, ax3]:
ax.tick_params(axis='both', labelsize=14)
for cax in [cax1, cax2, cax3]:
cax.tick_params(axis='both', labelsize=14)
# Calculate a list of points to sample in region
n = 1.33
x, step = np.linspace(0, 1.33, num=n_grid_pts, retstep=True, endpoint=False)
x += step/2
y, step = np.linspace(0, 0.94, num=n_grid_pts, retstep=True, endpoint=False)
y += step/2
pts = np.array(np.meshgrid(x, y)).reshape(2, n_grid_pts**2).T
def is_feasible(pt):
return True
pts_list = [pt for pt in pts if is_feasible(pt)]
pts = np.array(pts_list).T
# Calculate med and mad for each point
def calc_stats(param):
na_upper = param[0]
na_lower = param[1]
exp = multiframe.MultiFrameMicroscope(ill_thetas=[np.pi/4, -np.pi/4, np.pi/4, -np.pi/4], det_thetas=[-np.pi/4, np.pi/4, np.pi, np.pi],
ill_nas=4*[0], det_nas=[na_upper, na_upper, na_lower, na_lower],
ill_types=4*['sheet'], det_types=4*['lens'],
colors=['(1,0,0)','(0,0,1)','(1,0,0)','(0,0,1)'], n_frames=4,
n_pts=n_pts, max_photons=500, n_samp=1.33)
exp.calc_estimation_stats()
data = exp.sa_uncert
med = np.median(data)
return med, np.median(np.abs(data - med))
med = []
mad = []
for i, pt in enumerate(pts.T):
print('Calculating microscope '+str(i+1)+'/'+str(pts.shape[1]))
x = calc_stats(pt)
med.append(x[0])
mad.append(x[1])
# Plot 2D regions
def plot_2d_regions(ax, cax, pts, data, special_pt=(-1,-1)):
#ax.plot(NA_ill, NA_det, 'k-', zorder=11)
# Set y ticks
from matplotlib.ticker import FuncFormatter, FixedLocator
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 0.94])
ax.set_yticklabels(['0', '0.2', '0.4', '0.6', '0.8', '0.94'])
ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.33])
ax.set_xticklabels(['0', '0.25', '0.5', '0.75', '1.0', '1.33'])
# Annotation
def my_annotate(ax, annotation, xy, fontsize=9, rotation=0):
ax.annotate(annotation, xy=(0,0), xytext=xy, textcoords='axes fraction',
va='center', ha='center', fontsize=fontsize,
annotation_clip=False, rotation=rotation, zorder=13)
my_annotate(ax, 'NA${}_{\\textrm{lower}}$', (0.5, -0.15), fontsize=14)
my_annotate(ax, 'NA${}_{\\textrm{upper}}$', (-0.18, 0.5), fontsize=14, rotation=90)
# Calculate colors
color_map='coolwarm'
color_norm='log'
color_min=1e-5
color_max=1e-1
if color_norm == 'linear':
norm = matplotlib.colors.Normalize(vmin=color_min, vmax=color_max)
elif color_norm == 'log':
norm = matplotlib.colors.LogNorm(vmin=color_min, vmax=color_max)
elif color_norm == 'linlog':
norm = matplotlib.colors.SymLogNorm(linthresh=linthresh, vmin=-color_max, vmax=color_max)
elif color_norm == 'power':
norm = matplotlib.colors.PowerNorm(gamma=gamma, vmin=data.min(), vmax=data.max())
norm_data = norm(data).data
norm_data2 = np.expand_dims(norm_data, 1)
cmap = matplotlib.cm.get_cmap(color_map)
colors = np.apply_along_axis(cmap, 1, norm_data2)
# Plot scatter for colorbar
sc = ax.scatter(pts[0,:], pts[1,:], c=data, s=0, cmap=cmap, norm=norm,
marker='s', lw=0)
ax.plot(special_pt[0], special_pt[1], 'kx', markersize=5, zorder=15)
# Plot patches
width = n/(n_grid_pts)
height = width
for i, (pt, c) in enumerate(zip(pts_list, colors)):
ax.add_patch(patches.Rectangle((pt[0] - width/2, pt[1] - height/2), width, height, facecolor=c, edgecolor=c))
fig.colorbar(sc, cax=cax, orientation='vertical')
ax.set(xlim=[0, 1.33], ylim=[0, 0.94])
# Plot first two columns
na_upper = 0.6
na_lower = 1.1
exp = multiframe.MultiFrameMicroscope(ill_thetas=[np.pi/4, -np.pi/4, np.pi/4, -np.pi/4], det_thetas=[-np.pi/4, np.pi/4, np.pi, np.pi],
ill_nas=4*[0], det_nas=[na_upper, na_upper, na_lower, na_lower],
ill_types=4*['sheet'], det_types=4*['lens'],
colors=['(1,0,0)','(0,0,1)','(1,0,0)','(0,0,1)'], n_frames=4,
n_pts=n_pts_sphere, max_photons=500, n_samp=1.33)
exp.calc_estimation_stats()
# Make scene string
scene_string = exp.scene_string()
util.draw_scene(scene_string, my_ax=ax0, dpi=dpi)
util.plot_sphere(directions=exp.directions, data=exp.sa_uncert,
color_norm='log', linthresh=1e-4,
color_min=5e-4, color_max=2e-3,
my_ax=ax1, my_cax=cax1)
# Plots last two columns
plot_2d_regions(ax2, cax2, pts, med, special_pt=(na_lower, na_upper))
plot_2d_regions(ax3, cax3, pts, mad, special_pt=(na_lower, na_upper))
# Label axes and save
print('Saving final figure.')
fig.savefig('../paper/triple-arm.pdf', dpi=250)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
|
mit
|
alexsavio/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
6
|
35051
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class was deprecated in version 0.18 and will be
removed in 0.20. Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
WangWenjun559/Weiss
|
summary/sumy/sklearn/feature_selection/__init__.py
|
244
|
1088
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
|
apache-2.0
|
hjweide/cifar-10-uncertainty
|
plot.py
|
1
|
2213
|
#!/usr/bin/env python
import cPickle as pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
from mpl_toolkits.axes_grid1 import ImageGrid
from os import listdir
from os.path import join, splitext
def make_image_grid(img_list, probs_list, name):
max_per_row = 10
if max_per_row >= len(img_list):
num_cols = max_per_row
num_rows = 1
else:
num_cols = max_per_row
num_rows = int(np.ceil(len(img_list) / float(max_per_row)))
fig = plt.figure(1)
ax1 = plt.axes(frameon=False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
plt.title('apples classified as %s with p > 0.9' % name)
# share_all=True ==> all grid[i] have same x and y dimensions
grid = ImageGrid(
fig, 111, nrows_ncols=(num_rows, num_cols),
axes_pad=0.3, share_all=True)
for i in range(num_rows * num_cols):
if i < len(img_list):
grid[i].imshow(img_list[i][:, :, ::-1])
grid[i].set_title(label='%.3f' % float(probs_list[i]))
grid[i].get_xaxis().set_visible(False)
grid[i].get_yaxis().set_visible(False)
grid[i].set_frame_on(False)
plt.savefig('%s.png' % name, bbox_inches='tight')
def plot_images(dname):
fnames = [fname for fname in listdir(dname)]
class_dict = defaultdict(list)
probs_dict = defaultdict(list)
for fname in fnames:
path = join(dname, fname)
img = cv2.imread(path)
probs = splitext(fname)[0].split('-')
label = np.array(probs).argmax()
probs_dict[label].append(probs[label])
class_dict[label].append(img)
# get the strings for the class labels
with open('data/cifar-10-batches-py/batches.meta', 'rb') as ifile:
meta_dict = pickle.load(ifile)
label_names = meta_dict['label_names']
# plot a grid for each class label
for k in class_dict.keys():
print('plotting grid of %d images for class %d (%s)...' % (
len(class_dict[k]), k, label_names[k]))
make_image_grid(class_dict[k], probs_dict[k], name=label_names[k])
if __name__ == '__main__':
plot_images('images-predicted')
|
mit
|
d-lee/airflow
|
airflow/hooks/base_hook.py
|
18
|
2571
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
|
apache-2.0
|
musically-ut/statsmodels
|
statsmodels/regression/tests/test_regression.py
|
18
|
38246
|
"""
Test functions for models.regression
"""
# TODO: Test for LM
from statsmodels.compat.python import long, lrange
import warnings
import pandas
import numpy as np
from numpy.testing import (assert_almost_equal, assert_approx_equal, assert_,
assert_raises, assert_equal, assert_allclose)
from scipy.linalg import toeplitz
from statsmodels.tools.tools import add_constant, categorical
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker
from statsmodels.datasets import longley
from scipy.stats import t as student_t
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_7 = 7
DECIMAL_0 = 0
class CheckRegressionResults(object):
"""
res2 contains results from Rmodelwrap or were obtained from a statistical
packages such as R, Stata, or SAS and were written to model_results
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_standarderrors = DECIMAL_4
def test_standarderrors(self):
assert_almost_equal(self.res1.bse,self.res2.bse,
self.decimal_standarderrors)
decimal_confidenceintervals = DECIMAL_4
def test_confidenceintervals(self):
#NOTE: stata rounds residuals (at least) to sig digits so approx_equal
conf1 = self.res1.conf_int()
conf2 = self.res2.conf_int()
for i in range(len(conf1)):
assert_approx_equal(conf1[i][0], conf2[i][0],
self.decimal_confidenceintervals)
assert_approx_equal(conf1[i][1], conf2[i][1],
self.decimal_confidenceintervals)
decimal_conf_int_subset = DECIMAL_4
def test_conf_int_subset(self):
if len(self.res1.params) > 1:
ci1 = self.res1.conf_int(cols=(1,2))
ci2 = self.res1.conf_int()[1:3]
assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset)
else:
pass
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_rsquared = DECIMAL_4
def test_rsquared(self):
assert_almost_equal(self.res1.rsquared, self.res2.rsquared,
self.decimal_rsquared)
decimal_rsquared_adj = DECIMAL_4
def test_rsquared_adj(self):
assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj,
self.decimal_rsquared_adj)
def test_degrees(self):
assert_equal(self.res1.model.df_model, self.res2.df_model)
assert_equal(self.res1.model.df_resid, self.res2.df_resid)
decimal_ess = DECIMAL_4
def test_ess(self):
#Explained Sum of Squares
assert_almost_equal(self.res1.ess, self.res2.ess,
self.decimal_ess)
decimal_ssr = DECIMAL_4
def test_sumof_squaredresids(self):
assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr)
decimal_mse_resid = DECIMAL_4
def test_mse_resid(self):
#Mean squared error of residuals
assert_almost_equal(self.res1.mse_model, self.res2.mse_model,
self.decimal_mse_resid)
decimal_mse_model = DECIMAL_4
def test_mse_model(self):
assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid,
self.decimal_mse_model)
decimal_mse_total = DECIMAL_4
def test_mse_total(self):
assert_almost_equal(self.res1.mse_total, self.res2.mse_total,
self.decimal_mse_total, err_msg="Test class %s" % self)
decimal_fvalue = DECIMAL_4
def test_fvalue(self):
#didn't change this, not sure it should complain -inf not equal -inf
#if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)):
assert_almost_equal(self.res1.fvalue, self.res2.fvalue,
self.decimal_fvalue)
decimal_loglike = DECIMAL_4
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_pvalues = DECIMAL_4
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_wresid = DECIMAL_4
def test_wresid(self):
assert_almost_equal(self.res1.wresid, self.res2.wresid,
self.decimal_wresid)
decimal_resids = DECIMAL_4
def test_resids(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resids)
decimal_norm_resids = DECIMAL_4
def test_norm_resids(self):
assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson,
self.decimal_norm_resids)
#TODO: test fittedvalues and what else?
class TestOLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
def test_eigenvalues(self):
eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals)
eigenval_perc_diff /= self.res_qr.eigenvals
zeros = np.zeros_like(eigenval_perc_diff)
assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7)
# Robust error tests. Compare values computed with SAS
def test_HC0_errors(self):
#They are split up because the copied results do not have any DECIMAL_4
#places for the last place.
assert_almost_equal(self.res1.HC0_se[:-1],
self.res2.HC0_se[:-1], DECIMAL_4)
assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_HC1_errors(self):
assert_almost_equal(self.res1.HC1_se[:-1],
self.res2.HC1_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1])
def test_HC2_errors(self):
assert_almost_equal(self.res1.HC2_se[:-1],
self.res2.HC2_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1])
def test_HC3_errors(self):
assert_almost_equal(self.res1.HC3_se[:-1],
self.res2.HC3_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def test_qr_params(self):
assert_almost_equal(self.res1.params,
self.res_qr.params, 6)
def test_qr_normalized_cov_params(self):
#todo: need assert_close
assert_almost_equal(np.ones_like(self.res1.normalized_cov_params),
self.res1.normalized_cov_params /
self.res_qr.normalized_cov_params, 5)
def test_missing(self):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
data.endog[[3, 7, 14]] = np.nan
mod = OLS(data.endog, data.exog, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
def test_rsquared_adj_overfit(self):
# Test that if df_resid = 0, rsquared_adj = 0.
# This is a regression test for user issue:
# https://github.com/statsmodels/statsmodels/issues/868
with warnings.catch_warnings(record=True):
x = np.random.randn(5)
y = np.random.randn(5, 6)
results = OLS(x, y).fit()
rsquared_adj = results.rsquared_adj
assert_equal(rsquared_adj, np.nan)
def test_qr_alternatives(self):
assert_allclose(self.res_qr.params, self.res_qr_manual.params,
rtol=5e-12)
def test_norm_resid(self):
resid = self.res1.wresid
norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid)
model_norm_resid = self.res1.resid_pearson
assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7)
def test_norm_resid_zero_variance(self):
with warnings.catch_warnings(record=True):
y = self.res1.model.endog
res = OLS(y,y).fit()
assert_allclose(res.scale, 0, atol=1e-20)
assert_allclose(res.wresid, res.resid_pearson, atol=5e-11)
class TestRTO(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyRTO
data = longley.load()
res1 = OLS(data.endog, data.exog).fit()
res2 = LongleyRTO()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
cls.res_qr = res_qr
class TestFtest(object):
"""
Tests f_test vs. RegressionResults
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)[:-1,:]
cls.Ftest = cls.res1.f_test(R)
def test_F(self):
assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4)
def test_p(self):
assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4)
def test_Df_denom(self):
assert_equal(self.Ftest.df_denom, self.res1.model.df_resid)
def test_Df_num(self):
assert_equal(self.Ftest.df_num, 6)
class TestFTest2(object):
"""
A joint test that the coefficient on
GNP = the coefficient on UNEMP and that the coefficient on
POP = the coefficient on YEAR for the Longley dataset.
Ftest1 is from statsmodels. Results are from Rpy using R's car library.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
cls.Ftest1 = res1.f_test(R2)
hyp = 'x2 = x3, x5 = x6'
cls.NewFtest1 = res1.f_test(hyp)
def test_new_ftest(self):
assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue)
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 2)
class TestFtestQ(object):
"""
A joint hypothesis test that Rb = q. Coefficient tests are essentially
made up. Test values taken from Stata.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R = np.array([[0,1,1,0,0,0,0],
[0,1,0,1,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,1,0]])
q = np.array([0,0,0,1,0])
cls.Ftest1 = res1.f_test((R,q))
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 5)
class TestTtest(object):
"""
Test individual t-tests. Ie., are the coefficients significantly
different than zero.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)
cls.Ttest = cls.res1.t_test(R)
hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0'
cls.NewTTest = cls.res1.t_test(hyp)
def test_new_tvalue(self):
assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue)
def test_tvalue(self):
assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest.pvalue, student_t.sf(
np.abs(self.res1.tvalues), self.res1.model.df_resid)*2,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest.df_denom, self.res1.model.df_resid)
def test_effect(self):
assert_almost_equal(self.Ttest.effect, self.res1.params)
class TestTtest2(object):
"""
Tests the hypothesis that the coefficients on POP and YEAR
are equal.
Results from RPy using 'car' package.
"""
@classmethod
def setupClass(cls):
R = np.zeros(7)
R[4:6] = [1,-1]
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
cls.Ttest1 = res1.t_test(R)
def test_tvalue(self):
assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284,
DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest1.df_denom, 9)
def test_effect(self):
assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4)
class TestGLS(object):
"""
These test results were obtained by replication with R.
"""
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyGls
data = longley.load()
exog = add_constant(np.column_stack((data.exog[:,1],
data.exog[:,4])), prepend=False)
tmp_results = OLS(data.endog, exog).fit()
rho = np.corrcoef(tmp_results.resid[1:],
tmp_results.resid[:-1])[0][1] # by assumption
order = toeplitz(np.arange(16))
sigma = rho**order
GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
cls.res1 = GLS_results
cls.res2 = LongleyGls()
# attach for test_missing
cls.sigma = sigma
cls.exog = exog
cls.endog = data.endog
def test_aic(self):
assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def test_bic(self):
assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4)
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4)
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4)
def test_standarderrors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
DECIMAL_4)
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
def test_missing(self):
endog = self.endog.copy() # copy or changes endog for other methods
endog[[4,7,14]] = np.nan
mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
assert_equal(mod.sigma.shape, (13,13))
class TestGLS_alt_sigma(CheckRegressionResults):
"""
Test that GLS with no argument is equivalent to OLS.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_res = OLS(data.endog, data.exog).fit()
gls_res = GLS(data.endog, data.exog).fit()
gls_res_scalar = GLS(data.endog, data.exog, sigma=1)
cls.endog = data.endog
cls.exog = data.exog
cls.res1 = gls_res
cls.res2 = ols_res
cls.res3 = gls_res_scalar
# self.res2.conf_int = self.res2.conf_int()
def test_wrong_size_sigma_1d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1))
def test_wrong_size_sigma_2d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1)))
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2, DECIMAL_4)
class TestLM(object):
@classmethod
def setupClass(cls):
# TODO: Test HAC method
X = np.random.randn(100,3)
b = np.ones((3,1))
e = np.random.randn(100,1)
y = np.dot(X,b) + e
# Cases?
# Homoskedastic
# HC0
cls.res1_full = OLS(y,X).fit()
cls.res1_restricted = OLS(y,X[:,0]).fit()
cls.res2_full = cls.res1_full.get_robustcov_results('HC0')
cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0')
cls.X = X
cls.Y = y
def test_LM_homoskedastic(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
S = np.dot(resid,resid) / n * np.dot(X.T,X) / n
Sinv = np.linalg.inv(S)
s = np.mean(X * resid[:,None], 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_nodemean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_demean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
scores_demean = scores - scores.mean(0)
S = np.dot(scores_demean.T,scores_demean) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_LRversion(self):
resid = self.res1_restricted.wresid
resid_full = self.res1_full.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
s = np.mean(scores, 0)
scores = X * resid_full[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_nonnested(self):
assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full)
class TestOLS_GLS_WLS_equivalence(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
w = np.ones(n)
cls.results = []
cls.results.append(OLS(y, X).fit())
cls.results.append(WLS(y, X, w).fit())
cls.results.append(GLS(y, X, 100*w).fit())
cls.results.append(GLS(y, X, np.diag(0.1*w)).fit())
def test_ll(self):
llf = np.array([r.llf for r in self.results])
llf_1 = np.ones_like(llf) * self.results[0].llf
assert_almost_equal(llf, llf_1, DECIMAL_7)
ic = np.array([r.aic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].aic
assert_almost_equal(ic, ic_1, DECIMAL_7)
ic = np.array([r.bic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].bic
assert_almost_equal(ic, ic_1, DECIMAL_7)
def test_params(self):
params = np.array([r.params for r in self.results])
params_1 = np.array([self.results[0].params] * len(self.results))
assert_allclose(params, params_1)
def test_ss(self):
bse = np.array([r.bse for r in self.results])
bse_1 = np.array([self.results[0].bse] * len(self.results))
assert_allclose(bse, bse_1)
def test_rsquared(self):
rsquared = np.array([r.rsquared for r in self.results])
rsquared_1 = np.array([self.results[0].rsquared] * len(self.results))
assert_almost_equal(rsquared, rsquared_1, DECIMAL_7)
class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence):
# reuse test methods
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
np.random.seed(5)
w = np.random.uniform(0.5, 1, n)
w_inv = 1. / w
cls.results = []
cls.results.append(WLS(y, X, w).fit())
cls.results.append(WLS(y, X, 0.01 * w).fit())
cls.results.append(GLS(y, X, 100 * w_inv).fit())
cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit())
def test_rsquared(self):
# TODO: WLS rsquared is ok, GLS might have wrong centered_tss
# We only check that WLS and GLS rsquared is invariant to scaling
# WLS and GLS have different rsquared
assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared,
DECIMAL_7)
assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared,
DECIMAL_7)
class TestNonFit(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.endog = data.endog
cls.exog = data.exog
cls.ols_model = OLS(data.endog, data.exog)
def test_df_resid(self):
df_resid = self.endog.shape[0] - self.exog.shape[1]
assert_equal(self.ols_model.df_resid, long(9))
class TestWLS_CornerCases(object):
@classmethod
def setupClass(cls):
cls.exog = np.ones((1,))
cls.endog = np.ones((1,))
weights = 1
cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit()
def test_wrong_size_weights(self):
weights = np.ones((10,10))
assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights)
class TestWLSExogWeights(CheckRegressionResults):
#Test WLS with Greene's credit card data
#reg avgexp age income incomesq ownrent [aw=1/incomesq]
def __init__(self):
from .results.results_regression import CCardWLS
from statsmodels.datasets.ccard import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=False)
nobs = 72.
weights = 1/dta.exog[:,2]
# for comparison with stata analytic weights
scaled_weights = ((weights * nobs)/weights.sum())
self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit()
self.res2 = CCardWLS()
self.res2.wresid = scaled_weights ** .5 * self.res2.resid
# correction because we use different definition for loglike/llf
corr_ic = 2 * (self.res1.llf - self.res2.llf)
self.res2.aic -= corr_ic
self.res2.bic -= corr_ic
self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights))
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
def test_wls_tss():
y = np.array([22, 22, 22, 23, 23, 23])
X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]]
ols_mod = OLS(y, add_constant(X, prepend=False)).fit()
yw = np.array([22, 22, 23.])
Xw = [[1,0],[1,1],[0,1]]
w = np.array([2, 1, 3.])
wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit()
assert_equal(ols_mod.centered_tss, wls_mod.centered_tss)
class TestWLSScalarVsArray(CheckRegressionResults):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=True)
wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit()
weights = [1/3.] * len(dta.endog)
wls_array = WLS(dta.endog, dta.exog, weights=weights).fit()
cls.res1 = wls_scalar
cls.res2 = wls_array
#class TestWLS_GLS(CheckRegressionResults):
# @classmethod
# def setupClass(cls):
# from statsmodels.datasets.ccard import load
# data = load()
# cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit()
# cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit()
#
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2(), DECIMAL_4)
def test_wls_missing():
from statsmodels.datasets.ccard import load
data = load()
endog = data.endog
endog[[10, 25]] = np.nan
mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop')
assert_equal(mod.endog.shape[0], 70)
assert_equal(mod.exog.shape[0], 70)
assert_equal(mod.weights.shape[0], 70)
class TestWLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = WLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLS(data.endog, data.exog).fit()
cls.res2 = OLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
#TODO: test AR
# why the two-stage in AR?
#class test_ar(object):
# from statsmodels.datasets.sunspots import load
# data = load()
# model = AR(data.endog, rho=4).fit()
# R_res = RModel(data.endog, aic="FALSE", order_max=4)
# def test_params(self):
# assert_almost_equal(self.model.rho,
# pass
# def test_order(self):
# In R this can be defined or chosen by minimizing the AIC if aic=True
# pass
class TestYuleWalker(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.sunspots import load
data = load()
cls.rho, cls.sigma = yule_walker(data.endog, order=4,
method="mle")
cls.R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
def test_params(self):
assert_almost_equal(self.rho, self.R_params, DECIMAL_4)
class TestDataDimensions(CheckRegressionResults):
@classmethod
def setupClass(cls):
np.random.seed(54321)
cls.endog_n_ = np.random.uniform(0,20,size=30)
cls.endog_n_one = cls.endog_n_[:,None]
cls.exog_n_ = np.random.uniform(0,20,size=30)
cls.exog_n_one = cls.exog_n_[:,None]
cls.degen_exog = cls.exog_n_one[:-1]
cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod1.df_model += 1
cls.res1 = cls.mod1.fit()
# Note that these are created for every subclass..
# A little extra overhead probably
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_large_data(TestDataDimensions):
@classmethod
def setupClass(cls):
nobs = 1000
y = np.random.randn(nobs,1)
X = np.random.randn(nobs,20)
sigma = np.ones_like(y)
cls.gls_res = GLS(y, X, sigma=sigma).fit()
cls.gls_res_scalar = GLS(y, X, sigma=1).fit()
cls.gls_res_none= GLS(y, X).fit()
cls.ols_res = OLS(y, X).fit()
def test_large_equal_params(self):
assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7)
def test_large_equal_loglike(self):
assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7)
def test_large_equal_params_none(self):
assert_almost_equal(self.gls_res.params, self.gls_res_none.params,
DECIMAL_7)
class TestNxNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxOneNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxOneNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxNxOne(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNxOne, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def test_bad_size():
np.random.seed(54321)
data = np.random.uniform(0,20,31)
assert_raises(ValueError, OLS, data, data[1:])
def test_const_indicator():
np.random.seed(12345)
X = np.random.randint(0, 3, size=30)
X = categorical(X, drop=True)
y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30)
modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit()
mod = OLS(y, X, hasconst=True).fit()
assert_almost_equal(modc.rsquared, mod.rsquared, 12)
def test_706():
# make sure one regressor pandas Series gets passed to DataFrame
# for conf_int.
y = pandas.Series(np.random.randn(10))
x = pandas.Series(np.ones(10))
res = OLS(y,x).fit()
conf_int = res.conf_int()
np.testing.assert_equal(conf_int.shape, (1, 2))
np.testing.assert_(isinstance(conf_int, pandas.DataFrame))
def test_summary():
# test 734
import re
dta = longley.load_pandas()
X = dta.exog
X["constant"] = 1
y = dta.endog
with warnings.catch_warnings(record=True):
res = OLS(y, X).fit()
table = res.summary().as_latex()
# replace the date and time
table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&",
" Sun, 07 Apr 2013 &", table)
table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&",
" 13:46:07 &", table)
expected = """\\begin{center}
\\begin{tabular}{lclc}
\\toprule
\\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\
\\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\
\\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\
\\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\
\\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\
\\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\
\\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\
\\textbf{Df Model:} & 6 & \\textbf{ } & \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lcccccc}
& \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[0.025} & \\textbf{0.975]} \\\\
\\midrule
\\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 & 207.153 \\\\
\\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 & 0.040 \\\\
\\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 & -0.915 \\\\
\\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 & -0.549 \\\\
\\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 & 0.460 \\\\
\\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 & 2859.515 \\\\
\\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 & -1.47e+06 \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lclc}
\\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\
\\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\
\\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\
\\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\
\\bottomrule
\\end{tabular}
%\\caption{OLS Regression Results}
\\end{center}"""
assert_equal(table, expected)
class TestRegularizedFit(object):
# Make sure there are no issues when there are no selected
# variables.
def test_empty_model(self):
np.random.seed(742)
n = 100
endog = np.random.normal(size=n)
exog = np.random.normal(size=(n, 3))
model = OLS(endog, exog)
result = model.fit_regularized(alpha=1000)
assert_equal(result.params, 0.)
assert_equal(result.bse, 0.)
def test_regularized(self):
import os
from . import glmnet_r_results
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"),
delimiter=",")
tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")]
for test in tests:
vec = getattr(glmnet_r_results, test)
n = vec[0]
p = vec[1]
L1_wt = float(vec[2])
lam = float(vec[3])
params = vec[4:].astype(np.float64)
endog = data[0:int(n), 0]
exog = data[0:int(n), 1:(int(p)+1)]
endog = endog - endog.mean()
endog /= endog.std(ddof=1)
exog = exog - exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = OLS(endog, exog)
rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam)
assert_almost_equal(rslt.params, params, decimal=3)
# Smoke test for summary
smry = rslt.summary()
def test_formula_missing_cat():
# gh-805
import statsmodels.api as sm
from statsmodels.formula.api import ols
from patsy import PatsyError
dta = sm.datasets.grunfeld.load_pandas().data
dta.ix[0, 'firm'] = np.nan
mod = ols(formula='value ~ invest + capital + firm + year',
data=dta.dropna())
res = mod.fit()
mod2 = ols(formula='value ~ invest + capital + firm + year',
data=dta)
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year',
data=dta, missing='raise')
def test_missing_formula_predict():
# see 2171
nsample = 30
data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)})
null = pandas.DataFrame({'x': np.array([np.nan])})
data = pandas.concat([data, null])
beta = np.array([1, 0.1])
e = np.random.normal(size=nsample+1)
data['y'] = beta[0] + beta[1] * data['x'] + e
model = OLS.from_formula('y ~ x', data=data)
fit = model.fit()
pred = fit.predict(exog=data[:-1])
def test_fvalue_implicit_constant():
nobs = 100
np.random.seed(2)
x = np.random.randn(nobs, 1)
x = ((x > 0) == [True, False]).astype(int)
y = x.sum(1) + np.random.randn(nobs)
w = 1 + 0.25 * np.random.rand(nobs)
from statsmodels.regression.linear_model import OLS, WLS
res = OLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
res = WLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
if __name__=="__main__":
import nose
# run_module_suite()
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
|
bsd-3-clause
|
blab/antibody-response-pulse
|
bcell-array/code/Virus_Memory_Naive_Antibody_model.py
|
1
|
14663
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for repeated infection
# <codecell>
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
AlvaFontSize = 23
AlvaFigSize = (14, 6)
numberingFig = 0
numberingFig = numberingFig + 1;
plt.figure(numberingFig, figsize=(12,6))
plt.axis('off')
plt.title(r'$ Virus-IgM-IgG-Antibody \ response \ equations $'
, fontsize = AlvaFontSize)
plt.text(0, 7.0/9,r'$ \frac{\partial V_n(t)}{\partial t} = \
+\rho V_n(t)(1 - \frac{V_n(t)}{V_{max}}) - (\phi_{mv} + \phi_{gv}) A_{n}(t)V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = \
+\mu_M + \alpha_M V_n(t)M_{n}(t) - \mu_G G_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = \
+\mu_g + (\alpha_{gn} + \alpha_{bm}) V_{n}(t)N_{n}(t)B_{n}(t) - \mu_b B_n(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 1.0/9,r'$ \frac{\partial A_n(t)}{\partial t} = \
+\mu_a B_{n}(t) - (\phi_{ma} + \phi_{ga})A_{n}(t)V_{n}(t) - (\mu_{ma} + \mu_{ga})A_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.show()
# <codecell>
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
AlvaFontSize = 23
AlvaFigSize = (14, 6)
numberingFig = 0
numberingFig = numberingFig + 1;
plt.figure(numberingFig, figsize=(12,6))
plt.axis('off')
plt.title(r'$ Virus-Memory-Naive-Antibody \ response \ equations $'
, fontsize = AlvaFontSize)
plt.text(0, 5.0/6,r'$ \frac{\partial V_n(t)}{\partial t} = \
+\rho V_n(t)(1 - \frac{V_n(t)}{V_{max}}) - (\phi_{mv} + \phi_{gv}) A_{n}(t)V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 4.0/6,r'$ \frac{\partial B_n(t)}{\partial t} = \
+\mu_b + (\alpha_{bn} + \alpha_{bm}) V_{n}(t)C_{n}(t)B_{n}(t) - \mu_b B_n(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/6,r'$ \frac{\partial C_n(t)}{\partial t} = \
+\mu_c + \alpha_c V_n(t)C_{n}(t) - \mu_c C_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/6,r'$ \frac{\partial A_n(t)}{\partial t} = \
+\mu_a B_{n}(t) - (\phi_{ma} + \phi_{ga})A_{n}(t)V_{n}(t) - (\mu_{ma} + \mu_{ga})A_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.show()
# define the partial differential equations
def dAdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dA_dt_array = np.zeros(x_totalPoint)
# each dVdt with the same equation form
for xn in range(x_totalPoint):
dA_dt_array[xn] = +inRateA*B[xn] - (outRateAmV + outRateAgV)*A[xn]*V[xn] - (outRateAm + outRateAg)*A[xn]
return(dA_dt_array)
def dBdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dCdt with the same equation form
for xn in range(x_totalPoint):
dB_dt_array[xn] = +inOutRateB + (actRateB_naive + actRateB_memory)*V[xn]*C[xn]*B[xn] - inOutRateB*B[xn]
return(dB_dt_array)
def dCdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dC_dt_array = np.zeros(x_totalPoint)
# each dTdt with the same equation form
for xn in range(x_totalPoint):
dC_dt_array[xn] = +inOutRateC + actRateC*V[xn]*C[xn] - inOutRateC*C[xn]
return(dC_dt_array)
def dVdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dTdt with the same equation form
for xn in range(x_totalPoint):
dV_dt_array[xn] = +inRateV*V[xn]*(1 - V[xn]/totalV) - (outRateVg + outRateVm)*A[xn]*V[xn]
return(dV_dt_array)
# define RK4 for an array (3, n) of coupled differential equations
def AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX_In, maxX_In, totalGPoint_X, minT_In, maxT_In, totalGPoint_T):
global actRateB_memory
# primary size of pde equations
outWay = pde_array.shape[0]
# initialize the whole memory-space for output and input
inWay = 1; # one layer is enough for storing "x" and "t" (only two list of variable)
# define the first part of array as output memory-space
gridOutIn_array = np.zeros([outWay + inWay, totalGPoint_X, totalGPoint_T])
# loading starting output values
for i in range(outWay):
gridOutIn_array[i, :, :] = startingOut_Value[i, :, :]
# griding input X value
gridingInput_X = np.linspace(minX_In, maxX_In, num = totalGPoint_X, retstep = True)
# loading input values to (define the final array as input memory-space)
gridOutIn_array[-inWay, :, 0] = gridingInput_X[0]
# step-size (increment of input X)
dx = gridingInput_X[1]
# griding input T value
gridingInput_T = np.linspace(minT_In, maxT_In, num = totalGPoint_T, retstep = True)
# loading input values to (define the final array as input memory-space)
gridOutIn_array[-inWay, 0, :] = gridingInput_T[0]
# step-size (increment of input T)
dt = gridingInput_T[1]
# starting
# initialize the memory-space for local try-step
dydt1_array = np.zeros([outWay, totalGPoint_X])
dydt2_array = np.zeros([outWay, totalGPoint_X])
dydt3_array = np.zeros([outWay, totalGPoint_X])
dydt4_array = np.zeros([outWay, totalGPoint_X])
# initialize the memory-space for keeping current value
currentOut_Value = np.zeros([outWay, totalGPoint_X])
for tn in range(totalGPoint_T - 1):
actRateB_memory = 0
if tn > totalGPoint_T*(1.0/6):
actRateB_memory = float(0.01)*24
# setting virus1 = 0 if virus1 < 1
if gridOutIn_array[3, 0, tn] < 1.0:
gridOutIn_array[3, 0, tn] = 0.0
## 2nd infection
if tn == int(totalGPoint_T*1.0/4):
gridOutIn_array[3, 0, tn] = 1.0 # virus infection
### 3rd infection
if tn == int(totalGPoint_T*2.0/4):
gridOutIn_array[3, 0, tn] = 1.0 # virus infection
### 4rd infection
if tn == int(totalGPoint_T*3.0/4):
gridOutIn_array[3, 0, tn] = 1.0 # virus infection
# keep initial value at the moment of tn
currentOut_Value[:, :] = np.copy(gridOutIn_array[:-inWay, :, tn])
currentIn_T_Value = np.copy(gridOutIn_array[-inWay, 0, tn])
# first try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt1_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt1_array[:, :]*dt/2 # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt/2 # update input
# second half try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt2_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt2_array[:, :]*dt/2 # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt/2 # update input
# third half try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt3_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt3_array[:, :]*dt # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt # update input
# fourth try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt4_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
# solid step (update the next output) by accumulate all the try-steps with proper adjustment
gridOutIn_array[:-inWay, :, tn + 1] = currentOut_Value[:, :] + dt*(dydt1_array[:, :]/6
+ dydt2_array[:, :]/3
+ dydt3_array[:, :]/3
+ dydt4_array[:, :]/6)
# restore to initial value
gridOutIn_array[:-inWay, :, tn] = np.copy(currentOut_Value[:, :])
gridOutIn_array[-inWay, 0, tn] = np.copy(currentIn_T_Value)
# end of loop
return (gridOutIn_array[:-inWay, :])
# <codecell>
# setting parameter
timeUnit = 'day'
if timeUnit == 'hour':
hour = float(1); day = float(24);
elif timeUnit == 'day':
day = float(1); hour = float(1)/24;
inRateA = float(0.3)/hour # growth rate of antibody from B-cell (secretion)
outRateAm = float(0.0014)/hour # out rate of Antibody IgM
outRateAg = float(0.048)/hour # out rate of Antibody IgG
outRateAmV = float(4.2*10**(-5))/hour # antibody IgM clearance rate by virus
outRateAgV = float(1.67*10**(-4))/hour # antibody IgG clearance rate by virus
inOutRateB = float(0.007)/hour # birth rate of B-cell
actRateB_naive = float(6.0*10**(-7))/hour # activation rate of naive B-cell
#actRateB_memory = 0*float(0.0012)/hour # activation rate of memory B-cell
inOutRateC = float(0.017)/hour # birth rate of CD4 T-cell
actRateC = float(7.0*10**(-6))/hour # activation rate of CD4 T-cell
totalV = float(5000) # total virion/micro-liter
inRateV = float(0.16)/hour # intrinsic growth rate/hour
outRateVm = float(1.67*10**(-4))/hour # virion clearance rate by IgM
outRateVg = float(6.68*10**(-4))/hour # virion clearance rate by IgG
# time boundary and griding condition
minT = float(0); maxT = float(80*day);
totalGPoint_T = int(2*10**4 + 1);
gridT = np.linspace(minT, maxT, totalGPoint_T);
spacingT = np.linspace(minT, maxT, num = totalGPoint_T, retstep = True)
gridT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0); maxX = float(1);
totalGPoint_X = int(1 + 1);
gridX = np.linspace(minX, maxX, totalGPoint_X);
gridingX = np.linspace(minX, maxX, num = totalGPoint_X, retstep = True)
gridX = gridingX[0]
dx = gridingX[1]
gridA_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridB_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridC_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridV_array = np.zeros([totalGPoint_X, totalGPoint_T])
# initial output condition
gridA_array[:, 0] = float(0)
gridB_array[:, 0] = float(0)
gridC_array[0, 0] = float(0)
gridV_array[0, 0] = float(totalV)/10**3
# Runge Kutta numerical solution
pde_array = np.array([dAdt_array, dBdt_array, dCdt_array, dVdt_array])
startingOut_Value = np.array([gridA_array, gridB_array, gridC_array, gridV_array])
gridOut_array = AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX, maxX, totalGPoint_X, minT, maxT, totalGPoint_T)
# plotting
gridA = gridOut_array[0]
gridB = gridOut_array[1]
gridC = gridOut_array[2]
gridV = gridOut_array[3]
numberingFig = numberingFig + 1;
for i in range(1):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridA[i], color = 'green', label = r'$ A_{%i}(t) $'%(i))
plt.plot(gridT, gridB[i], color = 'blue', label = r'$ B_{%i}(t) $'%(i))
plt.plot(gridT, gridC[i], color = 'gray', label = r'$ C_{%i}(t) $'%(i))
plt.plot(gridT, gridV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i))
plt.grid(True)
plt.title(r'$ Antibody-Bcell-Tcell-Virus \ (immune \ response \ for \ primary-infection) $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ Cells/ \mu L $', fontsize = AlvaFontSize);
plt.text(maxT, totalV*6.0/6, r'$ \Omega = %f $'%(totalV), fontsize = AlvaFontSize)
plt.text(maxT, totalV*5.0/6, r'$ \phi = %f $'%(inRateV), fontsize = AlvaFontSize)
plt.text(maxT, totalV*4.0/6, r'$ \xi = %f $'%(inRateA), fontsize = AlvaFontSize)
plt.text(maxT, totalV*3.0/6, r'$ \mu_b = %f $'%(inOutRateB), fontsize = AlvaFontSize)
plt.legend(loc = (1,0))
# plt.yscale('log')
plt.show()
# <codecell>
# plotting
gridA = gridOut_array[0]
gridB = gridOut_array[1]
gridC = gridOut_array[2]
gridV = gridOut_array[3]
numberingFig = numberingFig + 1;
for i in range(1):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridA[i], color = 'green', label = r'$ A_{%i}(t) $'%(i))
plt.plot(gridT, gridB[i], color = 'blue', label = r'$ B_{%i}(t) $'%(i))
plt.plot(gridT, gridC[i], color = 'gray', label = r'$ C_{%i}(t) $'%(i))
plt.plot(gridT, gridV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i))
plt.grid(True)
plt.title(r'$ Antibody-Bcell-Tcell-Virus \ (immune \ response \ for \ repeated-infection) $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ Cells/ \mu L $', fontsize = AlvaFontSize);
plt.text(maxT, totalV*6.0/6, r'$ \Omega = %f $'%(totalV), fontsize = AlvaFontSize)
plt.text(maxT, totalV*5.0/6, r'$ \phi = %f $'%(inRateV), fontsize = AlvaFontSize)
plt.text(maxT, totalV*4.0/6, r'$ \xi = %f $'%(inRateA), fontsize = AlvaFontSize)
plt.text(maxT, totalV*3.0/6, r'$ \mu_b = %f $'%(inOutRateB), fontsize = AlvaFontSize)
plt.legend(loc = (1,0))
plt.ylim([2**0, 2**14])
plt.yscale('log', basey = 2)
plt.show()
numberingFig = numberingFig + 1;
for i in range(1):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridA[i], color = 'green', label = r'$ A_{%i}(t) $'%(i))
plt.plot(gridT, gridB[i], color = 'blue', label = r'$ B_{%i}(t) $'%(i))
plt.plot(gridT, gridC[i], color = 'gray', label = r'$ C_{%i}(t) $'%(i))
plt.plot(gridT, gridV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i))
plt.grid(True)
plt.title(r'$ Antibody-Virus \ (immune \ response \ for \ repeated-infection) $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ Cells/ \mu L $', fontsize = AlvaFontSize);
plt.legend(loc = (1,0))
plt.ylim([-1000, 10000])
plt.show()
# <codecell>
|
gpl-2.0
|
thientu/scikit-learn
|
examples/calibration/plot_compare_calibration.py
|
241
|
5008
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
dschien/PyExcelModelingHelper
|
tests/test_growth_coefficients.py
|
1
|
7189
|
import unittest
from datetime import datetime
import numpy as np
import pandas as pd
from excel_helper import ParameterRepository, ExcelParameterLoader, Parameter
class MyTestCase(unittest.TestCase):
def test_negative_growth(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
# samples = 3
# alpha = -0.1 # 100 percent p.a.
# ref_date = date(2009, 1, 1)
# start_date = date(2009, 1, 1)
# end_date = date(2010, 1, 1)
# a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
# print(a)
# assert np.all(a[0] == np.ones((samples, 1)))
# assert np.all(a[-1] == np.ones((samples, 1)) * 1 + alpha)
ref_date = datetime(2014, 1, 1)
ref_value = 9e-6
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=9e-8, cagr=-0.2, ref_date=ref_date)
date_values = [(datetime(2012, 1, 1), 1.30E-05), (datetime(2015, 1, 1), 7.20E-06),
(datetime(2016, 1, 1), 5.76E-06), (datetime(2017, 1, 1), 4.61E-06)]
settings = {
'use_time_series': True,
'times': pd.date_range(date_values[0][0], '2017-01-01', freq='MS'),
'sample_size': 1,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time').to_csv('check_cagr.csv')
print(a.mean(level='time'))
assert (a > 0).all()
for date_value in date_values:
assert np.all(np.abs(a.loc[date_value[0]] - date_value[1]) < 0.00001)
assert np.all(a.loc[ref_date] == ref_value)
# january = res.loc[[datetime(2009, 1, 1)]]
# assert np.all(np.equal(january, np.ones(january.shape)))
#
# april = res.loc[[datetime(2009, 4, 1)]]
# diff = april - np.ones(april.shape) * pow(1.1, 3. / 12)
#
# assert np.all(np.less(diff, np.ones(april.shape) * 0.00001))
#
# assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_ref_date_in_middle(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
ref_date = datetime(2016, 1, 1)
ref_value = 1.
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=ref_value / 10, cagr=1, ref_date=ref_date)
settings = {
'use_time_series': True,
'times': pd.date_range(datetime(2016, 1, 1), '2016-12-31', freq='MS'),
'sample_size': 2,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time') # .to_csv('check_cagr.csv')
print(a.mean(level='time'))
def test_ref_date_in_month_two(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
ref_date = datetime(2016, 2, 1)
ref_value = 1.
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=ref_value / 10, cagr=0, ref_date=ref_date)
settings = {
'use_time_series': True,
'times': pd.date_range(datetime(2016, 1, 1), '2016-12-31', freq='MS'),
'sample_size': 2,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time') # .to_csv('check_cagr.csv')
print(a.mean(level='time'))
def test_ref_date_at_start(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
ref_date = datetime(2016, 1, 1)
ref_value = 1.
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=ref_value / 10, cagr=0, ref_date=ref_date)
settings = {
'use_time_series': True,
'times': pd.date_range(datetime(2016, 1, 1), '2016-12-31', freq='MS'),
'sample_size': 2,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time') # .to_csv('check_cagr.csv')
print(a.mean(level='time'))
def test_ref_date_at_end(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
ref_date = datetime(2016, 12, 31)
ref_value = 1.
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=ref_value / 10, cagr=0, ref_date=ref_date)
settings = {
'use_time_series': True,
'times': pd.date_range(datetime(2016, 1, 1), '2016-12-31', freq='MS'),
'sample_size': 2,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time') # .to_csv('check_cagr.csv')
print(a.mean(level='time'))
def test_ref_date_before(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
ref_date = datetime(2015, 6, 30)
ref_value = 1.
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=ref_value / 10, cagr=0.1, ref_date=ref_date)
settings = {
'use_time_series': True,
'times': pd.date_range(datetime(2016, 1, 1), '2016-12-31', freq='MS'),
'sample_size': 2,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time') # .to_csv('check_cagr.csv')
print(a.mean(level='time'))
def test_ref_date_after(self):
"""
If start and end are one month apart, we expect an array of one row of ones of sample size for the ref month
and one row with CAGR applied
:return:
"""
ref_date = datetime(2017, 6, 30)
ref_value = 1.
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=ref_value, param_b=ref_value / 10, cagr=.1, ref_date=ref_date)
settings = {
'use_time_series': True,
'times': pd.date_range(datetime(2016, 1, 1), '2016-12-31', freq='MS'),
'sample_size': 2,
'sample_mean_value': True
}
a = p(settings)
a.mean(level='time') # .to_csv('check_cagr.csv')
print(a.mean(level='time'))
if __name__ == '__main__':
unittest.main()
|
mit
|
YinongLong/scikit-learn
|
sklearn/feature_selection/rfe.py
|
10
|
16481
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer)).scores_
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If the
estimator is a classifier or if ``y`` is neither binary nor multiclass,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
n_jobs : int, default 1
Number of cores to run in parallel while fitting across folds.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0,
n_jobs=1):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if self.n_jobs == 1:
parallel, func = list, _rfe_single_fit
else:
parallel, func, = Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y))
scores = np.sum(scores, axis=0)
n_features_to_select = max(
n_features - (np.argmax(scores) * self.step),
n_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y)
return self
|
bsd-3-clause
|
CKehl/pylearn2
|
pylearn2/sandbox/cuda_convnet/bench.py
|
44
|
3589
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
output = FilterActs()(images, filters)
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01v = base_image_value.transpose(3,0,1,2)
filters_bc01v = base_filters_value.transpose(3,0,1,2)
filters_bc01v = filters_bc01v[:,:,::-1,::-1]
images_bc01 = shared(images_bc01v)
filters_bc01 = shared(filters_bc01v)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', image_shape = images_bc01v.shape,
filter_shape = filters_bc01v.shape)
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 64,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
"""
|
bsd-3-clause
|
Laurawly/tvm-1
|
tutorials/frontend/from_tflite.py
|
1
|
6325
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile TFLite Models
=====================
**Author**: `Zhao Wu <https://github.com/FrozenGene>`_
This article is an introductory tutorial to deploy TFLite models with Relay.
To get started, TFLite package needs to be installed as prerequisite.
.. code-block:: bash
# install tflite
pip install tflite==2.1.0 --user
or you could generate TFLite package yourself. The steps are the following:
.. code-block:: bash
# Get the flatc compiler.
# Please refer to https://github.com/google/flatbuffers for details
# and make sure it is properly installed.
flatc --version
# Get the TFLite schema.
wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
# Generate TFLite package.
flatc --python schema.fbs
# Add current folder (which contains generated tflite module) to PYTHONPATH.
export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)
Now please check if TFLite package is installed successfully, ``python -c "import tflite"``
Below you can find an example on how to compile TFLite model using TVM.
"""
######################################################################
# Utils for downloading and extracting zip files
# ----------------------------------------------
import os
def extract(path):
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
######################################################################
# Load pretrained TFLite model
# ----------------------------
# Load mobilenet V1 TFLite model provided by Google
from tvm.contrib.download import download_testdata
model_url = "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz"
# Download model tar file and extract it to get mobilenet_v1_1.0_224.tflite
model_path = download_testdata(model_url, "mobilenet_v1_1.0_224.tgz", module=["tf", "official"])
model_dir = os.path.dirname(model_path)
extract(model_path)
# Now we can open mobilenet_v1_1.0_224.tflite
tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
# Get TFLite model from buffer
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
######################################################################
# Load a test image
# -----------------
# A single cat dominates the examples!
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
image_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
image_path = download_testdata(image_url, "cat.png", module="data")
resized_image = Image.open(image_path).resize((224, 224))
plt.imshow(resized_image)
plt.show()
image_data = np.asarray(resized_image).astype("float32")
# Add a dimension to the image so that we have NHWC format layout
image_data = np.expand_dims(image_data, axis=0)
# Preprocess image as described here:
# https://github.com/tensorflow/models/blob/edb6ed22a801665946c63d650ab9a0b23d98e1b1/research/slim/preprocessing/inception_preprocessing.py#L243
image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1
image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1
image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1
print("input", image_data.shape)
######################################################################
# Compile the model with relay
# ----------------------------
# TFLite input tensor name, shape and type
input_tensor = "input"
input_shape = (1, 224, 224, 3)
input_dtype = "float32"
# Parse TFLite model and convert it to a Relay module
from tvm import relay, transform
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}
)
# Build the module against to x86 CPU
target = "llvm"
with transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
# --------------
import tvm
from tvm import te
from tvm.contrib import graph_runtime as runtime
# Create a runtime executor module
module = runtime.GraphModule(lib["default"](tvm.cpu()))
# Feed input data
module.set_input(input_tensor, tvm.nd.array(image_data))
# Run
module.run()
# Get output
tvm_output = module.get_output(0).asnumpy()
######################################################################
# Display results
# ---------------
# Load label file
label_file_url = "".join(
[
"https://raw.githubusercontent.com/",
"tensorflow/tensorflow/master/tensorflow/lite/java/demo/",
"app/src/main/assets/",
"labels_mobilenet_quant_v1_224.txt",
]
)
label_file = "labels_mobilenet_quant_v1_224.txt"
label_path = download_testdata(label_file_url, label_file, module="data")
# List of 1001 classes
with open(label_path) as f:
labels = f.readlines()
# Convert result to 1D data
predictions = np.squeeze(tvm_output)
# Get top 1 prediction
prediction = np.argmax(predictions)
# Convert id to class name and show the result
print("The image prediction result is: id " + str(prediction) + " name: " + labels[prediction])
|
apache-2.0
|
pianomania/scikit-learn
|
sklearn/mixture/tests/test_bayesian_mixture.py
|
84
|
17929
|
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
PRIOR_TYPE = ['dirichlet_process', 'dirichlet_distribution']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
weight_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(weight_concentration)) -
np.sum(gammaln(weight_concentration)))
predected_norm = _log_dirichlet_norm(weight_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type, bgmm.fit, X)
def test_bayesian_mixture_weight_concentration_prior_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bad_prior_type = 'bad_prior_type'
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=bad_prior_type, random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'weight_concentration_prior_type':"
" %s 'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% bad_prior_type, bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of weight_concentration_prior
bad_weight_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
weight_concentration_prior=bad_weight_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_weight_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of weight_concentration_prior
weight_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
weight_concentration_prior=weight_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(weight_concentration_prior,
bgmm.weight_concentration_prior_)
# Check correct init for the default value of weight_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.weight_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Case Dirichlet distribution for the weight concentration prior type
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=3, random_state=rng).fit(X)
expected_weights = (bgmm.weight_concentration_ /
np.sum(bgmm.weight_concentration_))
assert_almost_equal(expected_weights, bgmm.weights_)
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
# Case Dirichlet process for the weight concentration prior type
dpgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3, random_state=rng).fit(X)
weight_dirichlet_sum = (dpgmm.weight_concentration_[0] +
dpgmm.weight_concentration_[1])
tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
expected_weights = (dpgmm.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
expected_weights /= np.sum(expected_weights)
assert_almost_equal(expected_weights, dpgmm.weights_)
assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=20)
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type=covar_type,
warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
full_covariances = (
bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov)
for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(
spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
@ignore_warnings(category=ConvergenceWarning)
def test_invariant_translation():
# We check here that adding a constant in the data change correctly the
# parameters of the mixture
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=100)
n_components = 2 * rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm1 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X)
bgmm2 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X + 100)
assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)
|
bsd-3-clause
|
jnez71/kalmaNN
|
demos/2d_classify.py
|
1
|
1766
|
#!/usr/bin/env python
"""
Training and using a KNN for classification of 2D data.
Comparison of training methods, EKF vs SGD.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import kalmann
# Get some noisy training data classifications, spirals!
n = 100
stdev = 0.2
U = np.zeros((n*3, 2))
Y = np.zeros((n*3, 1), dtype='uint8')
for j in xrange(3):
ix = range(n*j, n*(j+1))
r = np.linspace(0, 1, n)
t = np.linspace(j*4, (j+1)*4, n) + np.random.normal(0, stdev, n)
U[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j
Y[-20:-18] = 0
# Create two identical KNN's that will be trained differently
knn_ekf = kalmann.KNN(nu=2, ny=1, nl=10, neuron='logistic')
knn_sgd = kalmann.KNN(nu=2, ny=1, nl=10, neuron='logistic')
# Train
nepochs_ekf = 100
nepochs_sgd = 200
knn_ekf.train(nepochs=nepochs_ekf, U=U, Y=Y, method='ekf', P=0.2, Q=0, R=stdev**2, pulse_T=2)
knn_sgd.train(nepochs=nepochs_sgd, U=U, Y=Y, method='sgd', step=0.1, pulse_T=2)
# Use the KNNs as classifiers
F_ekf = knn_ekf.classify(U, high=2, low=0)
F_sgd = knn_sgd.classify(U, high=2, low=0)
print("EKF Classification Accuracy: {}%".format(int(100*np.sum(F_ekf==Y)/len(Y))))
print("SGD Classification Accuracy: {}%\n".format(int(100*np.sum(F_sgd==Y)/len(Y))))
# Evaluation
fig = plt.figure()
ax = fig.add_subplot(1, 3, 1)
ax.set_title("True Classifications", fontsize=22)
ax.scatter(U[:, 0], U[:, 1], c=Y[:,0])
plt.axis('equal')
ax = fig.add_subplot(1, 3, 2)
ax.set_title("EKF: {} epochs".format(nepochs_ekf), fontsize=22)
ax.scatter(U[:, 0], U[:, 1], c=F_ekf[:,0])
plt.axis('equal')
ax = fig.add_subplot(1, 3, 3)
ax.set_title("SGD: {} epochs".format(nepochs_sgd), fontsize=22)
ax.scatter(U[:, 0], U[:, 1], c=F_sgd[:,0])
plt.axis('equal')
plt.show()
|
mit
|
rhiever/sklearn-benchmarks
|
model_code/random_search/LogisticRegression.py
|
1
|
1120
|
import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import LogisticRegression
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
np.random.seed(random_seed)
pipeline_components = [RobustScaler, LogisticRegression]
pipeline_parameters = {}
C_values = np.random.uniform(low=1e-10, high=10., size=num_param_combinations)
penalty_values = np.random.choice(['l1', 'l2'], size=num_param_combinations)
fit_intercept_values = np.random.choice([True, False], size=num_param_combinations)
dual_values = np.random.choice([True, False], size=num_param_combinations)
all_param_combinations = zip(C_values, penalty_values, fit_intercept_values, dual_values)
pipeline_parameters[LogisticRegression] = \
[{'C': C, 'penalty': penalty, 'fit_intercept': fit_intercept, 'dual': False if penalty != 'l2' else dual, 'random_state': 324089}
for (C, penalty, fit_intercept, dual) in all_param_combinations]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
|
mit
|
ajc158/HoneyBee-Angular-Velocity-Detection
|
AVDU_model/Paper_detector_behaviour/analyse_FigX.py
|
1
|
2109
|
#!/usr/bin/python
import xml.etree.ElementTree as ET
import sml_log_parser
from subprocess import call, Popen
import os.path
import os
import time, stat, random, math
import ctypes
import struct
import csv
import numpy
from os import listdir
import matplotlib.pyplot as plt
print 'Script to generate Figure for the paper, showing centring accuracy'
SML_2_B_dir = "/Users/alex/Documents/SpineML_2_BRAHMS/"
wall_freqs = [10,
12.5892541179,
15.8489319246,
19.9526231497,
25.1188643151,
31.6227766017,
39.8107170553,
50.1187233627,
63.095734448,
79.4328234724,
100,
125.8925411794,
158.4893192461,
199.5262314969,
251.188643151,
316.2277660168,
398.1071705535,
501.1872336273,
630.9573444802,
794.3282347243,
1000]
# the different configs
print "Running ",
print len(wall_freqs),
print " bees..."
print 'Beginning bees'
runnum = 0
baseport = 50091
running_procs = []
max_procs = 4
xml_file = "beeworldConfigCorridor.xml"
vals = []
for i in xrange(0,7):
for wall_freq in wall_freqs:
runnum = runnum + 1
print 'Getting bee ',
print runnum
log_dir = "{0}/outtemp/bee{1}/log/".format(SML_2_B_dir, runnum)
log = sml_log_parser.sml_log(log_dir, 'av_out_logrep.xml')
data = log.getdataforindex(0)
#print len(data)
a = numpy.array(data)
vals.append(a[-1])
print "Vals"
for val in vals:
print val
# plot
p1 = plt.plot(wall_freqs, vals[0:21],'r')
plt.hold
p2 = plt.plot(wall_freqs, vals[21:42],'g')
p3 = plt.plot(wall_freqs, vals[42:63],'b')
p4 = plt.plot(wall_freqs, vals[63:84],'y')
plt.legend(['11Hz','19Hz','38Hz','76Hz'])
#plt.ylim([-3,3])
#plt.xlim([0,5])
plt.xscale('log')
plt.title("Spatial frequency invariance")
plt.xlabel("Angular velocity (deg/s)")
plt.ylabel("Detector response")
plt.figure()
p2 = plt.plot(wall_freqs, vals[84:105],'g')
plt.hold
p3 = plt.plot(wall_freqs, vals[105:126],'b')
p4 = plt.plot(wall_freqs, vals[126:147],'y')
p1 = plt.plot(wall_freqs, vals[21:42],'r')
plt.legend(['2:3','2:4','2:6','2:8'])
plt.xscale('log')
plt.title("Contrast invariance")
plt.xlabel("Angular velocity (deg/s)")
plt.ylabel("Detector response")
plt.show()
|
gpl-3.0
|
techbureau/zaifbot
|
zaifbot/indicators/bollinger_bands.py
|
1
|
1328
|
import pandas as pd
from talib import MA_Type
from .indicator import Indicator
class BBANDS(Indicator):
_NAME = 'bbands'
def __init__(self, currency_pair='btc_jpy', period='1d', length=25, matype=MA_Type.EMA):
super().__init__(currency_pair, period)
self._length = self._bounded_length(length)
self._matype = matype
def request_data(self, count=100, lowbd=2, upbd=2, to_epoch_time=None):
candlesticks_df = self._get_candlesticks_df(count, to_epoch_time)
bbands = self._exec_talib_func(candlesticks_df,
timeperiod=self._length,
nbdevup=upbd,
nbdevdn=lowbd,
matype=self._matype)
formatted_bbands = self._formatting(candlesticks_df, bbands)
return formatted_bbands
def _required_candlesticks_count(self, count):
return self._length + self._bounded_count(count) - 1
@staticmethod
def _formatting(candlesticks_df, bbands):
bbands_with_time = pd.concat([candlesticks_df['time'], bbands[['lowerband', 'upperband']]], axis=1)
bbands_with_time.dropna(inplace=True)
dict_bands = bbands_with_time.astype(object).to_dict(orient='records')
return dict_bands
|
mit
|
lordkman/burnman
|
burnman/tools.py
|
3
|
28641
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from __future__ import print_function
import operator
import bisect
import os
import pkgutil
import numpy as np
from scipy.optimize import fsolve, curve_fit
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import interp2d
from collections import Counter
import itertools
from . import constants
import itertools
def copy_documentation(copy_from):
"""
Decorator @copy_documentation(another_function) will copy the documentation found in a different
function (for example from a base class). The docstring applied to some function a() will be ::
(copied from BaseClass.some_function):
<documentation from BaseClass.some_function>
<optionally the documentation found in a()>
"""
def mydecorator(func):
def wrapper(*args):
return func(*args)
old = ""
if func.__doc__:
old = "\n" + func.__doc__
copied_from = ""
if hasattr(copy_from, "__name__"):
copied_from = "(copied from " + copy_from.__name__ + "):\n"
wrapper.__doc__ = copied_from + copy_from.__doc__ + old
wrapper.__name__ = func.__name__
return wrapper
return mydecorator
def flatten(l): return flatten(l[0]) + (flatten(l[1:]) if len(l) > 1 else []) if type(l) is list or type(l) is np.ndarray else [l]
def round_to_n(x, xerr, n):
return round(x, -int(np.floor(np.log10(np.abs(xerr)))) + (n - 1))
def unit_normalize(a, order=2, axis=-1):
"""
Calculates the L2 normalized array of numpy array a
of a given order and along a given axis.
"""
l2 = np.atleast_1d(np.apply_along_axis(np.linalg.norm, axis, a, order))
l2[l2==0] = 1
return a / np.expand_dims(l2, axis)[0][0]
def pretty_print_values(popt, pcov, params):
"""
Takes a numpy array of parameters, the corresponding covariance matrix
and a set of parameter names and prints the parameters and
principal 1-s.d.uncertainties (np.sqrt(pcov[i][i]))
in a nice text based format.
"""
for i, p in enumerate(params):
p_rnd = round_to_n(popt[i], np.sqrt(pcov[i][i]), 1)
c_rnd = round_to_n(np.sqrt(pcov[i][i]), np.sqrt(pcov[i][i]), 1)
if p_rnd != 0.:
p_expnt = np.floor(np.log10(np.abs(p_rnd)))
else:
p_expnt = 0.
scale = np.power(10., p_expnt)
nd = p_expnt - np.floor(np.log10(np.abs(c_rnd)))
print ('{0:s}: ({1:{4}{5}f} +/- {2:{4}{5}f}) x {3:.0e}'.format(p, p_rnd/scale, c_rnd/scale, scale, 0, (nd)/10.))
def pretty_print_table(table, use_tabs=False):
"""
Takes a 2d table and prints it in a nice text based format. If
use_tabs=True then only \t is used as a separator. This is useful for
importing the data into other apps (Excel, ...). The default is to pad
the columns with spaces to make them look neat. The first column is
left aligned, while the remainder is right aligned.
"""
if use_tabs:
for r in table:
print("\t".join(r).replace("_", "\_"))
return
def col_width(table, colidx):
return max([len(str(row[colidx])) for row in table])
# create a format string with the first column left aligned, the others right
# example: {:<27}{:>11}{:>6}{:>8}
frmt = "".join(
[('{:<' if i == 0 else '{:>') + str(1 + col_width(table, i)) + '}' for i in range(len(table[0]))])
for r in table:
print(frmt.format(*r))
def pretty_plot():
"""
Makes pretty plots. Overwrites the matplotlib default settings to allow
for better fonts. Slows down plotting
"""
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = '\\usepackage{relsize}'
plt.rc('font', family='sanserif')
def sort_table(table, col=0):
"""
Sort the table according to the column number
"""
return sorted(table, key=operator.itemgetter(col))
def float_eq(a, b):
"""
Test if two floats are almost equal to each other
"""
return abs(a - b) < 1e-10 * max(1e-5, abs(a), abs(b))
def linear_interpol(x, x1, x2, y1, y2):
"""
Linearly interpolate to point x, between
the points (x1,y1), (x2,y2)
"""
assert(x1 <= x)
assert(x2 >= x)
assert(x1 <= x2)
alpha = (x - x1) / (x2 - x1)
return (1. - alpha) * y1 + alpha * y2
def read_table(filename):
datastream = pkgutil.get_data('burnman', 'data/' + filename)
datalines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table = []
for line in datalines:
if (line[0] != '#'):
numbers = np.fromstring(line, sep=' ')
table.append(numbers)
return np.array(table)
def array_from_file(filename):
"""
Generic function to read a file containing floats and commented lines
into a 2D numpy array.
Commented lines are prefixed by the characters # or %.
"""
f = open(filename, 'r')
data = []
datastream = f.read()
f.close()
datalines = [line.strip().split()
for line in datastream.split('\n') if line.strip()]
for line in datalines:
if line[0] != "#" and line[0] != "%":
data.append(map(float, line))
data = np.array(zip(*data))
return data
def cut_table(table, min_value, max_value):
tablen = []
for i in range(min_value, max_value, 1):
tablen.append(table[i, :])
return tablen
def lookup_and_interpolate(table_x, table_y, x_value):
idx = bisect.bisect_left(table_x, x_value) - 1
if (idx < 0):
return table_y[0]
elif (idx < len(table_x) - 1):
return linear_interpol(x_value, table_x[idx], table_x[idx + 1],
table_y[idx], table_y[idx + 1])
else:
return table_y[idx]
def molar_volume_from_unit_cell_volume(unit_cell_v, z):
"""
Converts a unit cell volume from Angstroms^3 per unitcell,
to m^3/mol.
Parameters
----------
unit_cell_v : float
Unit cell volumes [A^3/unit cell]
z : float
Number of formula units per unit cell
Returns
-------
V : float
Volume [m^3/mol]
"""
V = unit_cell_v * constants.Avogadro / 1.e30 / z
return V
def equilibrium_pressure(minerals, stoichiometry, temperature, pressure_initial_guess=1.e5):
"""
Given a list of minerals, their reaction stoichiometries
and a temperature of interest, compute the
equilibrium pressure of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
temperature : float
Temperature of interest [K]
pressure_initial_guess : optional float
Initial pressure guess [Pa]
Returns
-------
pressure : float
The equilibrium pressure of the reaction [Pa]
"""
def eqm(P, T):
gibbs = 0.
for i, mineral in enumerate(minerals):
mineral.set_state(P[0], T)
gibbs = gibbs + mineral.gibbs * stoichiometry[i]
return gibbs
pressure = fsolve(eqm, [pressure_initial_guess], args=(temperature))[0]
return pressure
def equilibrium_temperature(minerals, stoichiometry, pressure, temperature_initial_guess=1000.):
"""
Given a list of minerals, their reaction stoichiometries
and a pressure of interest, compute the
equilibrium temperature of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
pressure : float
Pressure of interest [Pa]
temperature_initial_guess : optional float
Initial temperature guess [K]
Returns
-------
temperature : float
The equilibrium temperature of the reaction [K]
"""
def eqm(T, P):
gibbs = 0.
for i, mineral in enumerate(minerals):
mineral.set_state(P, T[0])
gibbs = gibbs + mineral.gibbs * stoichiometry[i]
return gibbs
temperature = fsolve(eqm, [temperature_initial_guess], args=(pressure))[0]
return temperature
def invariant_point(minerals_r1, stoichiometry_r1,
minerals_r2, stoichiometry_r2,
pressure_temperature_initial_guess=[1.e9, 1000.]):
"""
Given a list of minerals, their reaction stoichiometries
and a pressure of interest, compute the
equilibrium temperature of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
pressure : float
Pressure of interest [Pa]
temperature_initial_guess : optional float
Initial temperature guess [K]
Returns
-------
temperature : float
The equilibrium temperature of the reaction [K]
"""
def eqm(PT):
P, T = PT
gibbs_r1 = 0.
for i, mineral in enumerate(minerals_r1):
mineral.set_state(P, T)
gibbs_r1 = gibbs_r1 + mineral.gibbs * stoichiometry_r1[i]
gibbs_r2 = 0.
for i, mineral in enumerate(minerals_r2):
mineral.set_state(P, T)
gibbs_r2 = gibbs_r2 + mineral.gibbs * stoichiometry_r2[i]
return [gibbs_r1, gibbs_r2]
pressure, temperature = fsolve(eqm, pressure_temperature_initial_guess)
return pressure, temperature
def hugoniot(mineral, P_ref, T_ref, pressures, reference_mineral=None):
"""
Calculates the temperatures (and volumes) along a Hugoniot
as a function of pressure according to the Hugoniot equation
U2-U1 = 0.5*(p2 - p1)(V1 - V2) where U and V are the
internal energies and volumes (mass or molar) and U = F + TS
Parameters
----------
mineral : mineral
Mineral for which the Hugoniot is to be calculated.
P_ref : float
Reference pressure [Pa]
T_ref : float
Reference temperature [K]
pressures : numpy array of floats
Set of pressures [Pa] for which the Hugoniot temperature
and volume should be calculated
reference_mineral : mineral
Mineral which is stable at the reference conditions
Provides an alternative U_0 and V_0 when the reference
mineral transforms to the mineral of interest at some
(unspecified) pressure.
Returns
-------
temperatures : numpy array of floats
The Hugoniot temperatures at pressure
volumes : numpy array of floats
The Hugoniot volumes at pressure
"""
def Ediff(T, mineral, P, P_ref, U_ref, V_ref):
mineral.set_state(P, T[0])
U = mineral.helmholtz + T[0] * mineral.S
V = mineral.V
return (U - U_ref) - 0.5 * (P - P_ref) * (V_ref - V)
if reference_mineral is None:
reference_mineral = mineral
reference_mineral.set_state(P_ref, T_ref)
U_ref = reference_mineral.helmholtz + T_ref * reference_mineral.S
V_ref = reference_mineral.V
temperatures = np.empty_like(pressures)
volumes = np.empty_like(pressures)
for i, P in enumerate(pressures):
temperatures[i] = fsolve(
Ediff, [T_ref], args=(mineral, P, P_ref, U_ref, V_ref))[0]
volumes[i] = mineral.V
return temperatures, volumes
def convert_fractions(composite, phase_fractions, input_type, output_type):
"""
Takes a composite with a set of user defined molar, volume
or mass fractions (which do not have to be the fractions
currently associated with the composite) and
converts the fractions to molar, mass or volume.
Conversions to and from mass require a molar mass to be
defined for all phases. Conversions to and from volume
require set_state to have been called for the composite.
Parameters
----------
composite : Composite
Composite for which fractions are to be defined.
phase_fractions : list of floats
List of input phase fractions (of type input_type)
input_type : string
Input fraction type: 'molar', 'mass' or 'volume'
output_type : string
Output fraction type: 'molar', 'mass' or 'volume'
Returns
-------
output_fractions : list of floats
List of output phase fractions (of type output_type)
"""
if input_type == 'volume' or output_type == 'volume':
if composite.temperature == None:
raise Exception(
composite.to_string() + ".set_state(P, T) has not been called, so volume fractions are currently undefined. Exiting.")
if input_type == 'molar':
molar_fractions = phase_fractions
if input_type == 'volume':
total_moles = sum(
volume_fraction / phase.molar_volume for volume_fraction,
phase in zip(phase_fractions, composite.phases))
molar_fractions = [volume_fraction / (phase.molar_volume * total_moles)
for volume_fraction, phase in zip(phase_fractions, composite.phases)]
if input_type == 'mass':
total_moles = sum(mass_fraction / phase.molar_mass for mass_fraction,
phase in zip(phase_fractions, composite.phases))
molar_fractions = [mass_fraction / (phase.molar_mass * total_moles)
for mass_fraction, phase in zip(phase_fractions, composite.phases)]
if output_type == 'volume':
total_volume = sum(
molar_fraction * phase.molar_volume for molar_fraction,
phase in zip(molar_fractions, composite.phases))
output_fractions = [molar_fraction * phase.molar_volume /
total_volume for molar_fraction, phase in zip(molar_fractions, composite.phases)]
elif output_type == 'mass':
total_mass = sum(molar_fraction * phase.molar_mass for molar_fraction,
phase in zip(molar_fractions, composite.phases))
output_fractions = [molar_fraction * phase.molar_mass /
total_mass for molar_fraction, phase in zip(molar_fractions, composite.phases)]
elif output_type == 'molar':
output_fractions = molar_fractions
return output_fractions
def bracket(fn, x0, dx, args=(), ratio=1.618, maxiter=100):
"""
Given a function and a starting guess, find two
inputs for the function that bracket a root.
Parameters
----------
fn : function
The function to bracket
x0 : float
The starting guess
dx : float
Small step for starting the search
args : parameter list
Additional arguments to give to fn
ratio :
The step size increases by this ratio
every step in the search. Defaults to
the golden ratio.
maxiter : int
The maximum number of steps before giving up.
Returns
-------
xa, xb, fa, fb: floats
xa and xb are the inputs which bracket a root of fn.
fa and fb are the values of the function at those points.
If the bracket function takes more than maxiter steps,
it raises a ValueError.
"""
niter = 0
dx = np.abs(dx)
assert(ratio > 1.0)
# Get the starting positions
f0 = fn(x0, *args)
x_left = x0 - dx
x_right = x0 + dx
f_left = fn(x_left, *args)
f_right = fn(x_right, *args)
# Overshot zero, try making dx smaller
if (f0 - f_left) * (f_right - f0) < 0.:
while (f0 - f_left) * (f_right - f0) < 0. and dx > np.finfo('float').eps and niter < maxiter:
dx /= ratio
x_left = x0 - dx
x_right = x0 + dx
f_left = fn(x_left, *args)
f_right = fn(x_right, *args)
niter += 1
if niter == maxiter: # Couldn't find something with same slope in both directions
raise ValueError('Cannot find zero.')
niter = 0
slope = f_right - f0
if slope > 0. and f0 > 0.: # Walk left
dx = -dx
x1 = x_left
f1 = f_left
elif slope > 0. and f0 < 0.: # Walk right
x1 = x_right
f1 = f_right
elif slope < 0. and f0 > 0: # Walk right
x1 = x_right
f1 = f_right
else: # Walk left
dx = -dx
x1 = x_left
f1 = f_left
# Do the walking
while f0 * f1 > 0. and niter < maxiter:
dx *= ratio
xnew = x1 + dx
fnew = fn(xnew, *args)
x0 = x1
f0 = f1
x1 = xnew
f1 = fnew
niter += 1
if f0 * f1 > 0.:
raise ValueError('Cannot find zero.')
else:
return x0, x1, f0, f1
def check_eos_consistency(m, P=1.e9, T=300., tol=1.e-4, verbose=False):
"""
Compute numerical derivatives of the gibbs free energy of a mineral
under given conditions, and check these values against those provided
analytically by the equation of state
Parameters
----------
m : mineral
The mineral for which the equation of state
is to be checked for consistency
P : float
The pressure at which to check consistency
T : float
The temperature at which to check consistency
tol : float
The fractional tolerance for each of the checks
verbose : boolean
Decide whether to print information about each
check
Returns
-------
consistency: boolean
If all checks pass, returns True
"""
dT = 1.
dP = 1000.
m.set_state(P, T)
G0 = m.gibbs
S0 = m.S
V0 = m.V
expr = ['G = F + PV', 'G = H - TS', 'G = E - TS + PV']
eq = [[m.gibbs, (m.helmholtz + P*m.V)],
[m.gibbs, (m.H - T*m.S)],
[m.gibbs, (m.internal_energy - T*m.S + P*m.V)]]
m.set_state(P, T + dT)
G1 = m.gibbs
S1 = m.S
V1 = m.V
m.set_state(P + dP, T)
G2 = m.gibbs
V2 = m.V
# T derivatives
m.set_state(P, T + 0.5*dT)
expr.extend(['S = -dG/dT', 'alpha = 1/V dV/dT', 'C_p = T dS/dT'])
eq.extend([[m.S, -(G1 - G0)/dT],
[m.alpha, (V1 - V0)/dT/m.V],
[m.heat_capacity_p, (T + 0.5*dT)*(S1 - S0)/dT]])
# P derivatives
m.set_state(P + 0.5*dP, T)
expr.extend(['V = dG/dP', 'K_T = -V dP/dV'])
eq.extend([[m.V, (G2 - G0)/dP],
[m.K_T, -0.5*(V2 + V0)*dP/(V2 - V0)]])
expr.extend(['C_v = Cp - alpha^2*K_T*V*T', 'K_S = K_T*Cp/Cv', 'gr = alpha*K_T*V/Cv'])
eq.extend([[m.heat_capacity_v, m.heat_capacity_p - m.alpha*m.alpha*m.K_T*m.V*T],
[m.K_S, m.K_T*m.heat_capacity_p/m.heat_capacity_v],
[m.gr, m.alpha*m.K_T*m.V/m.heat_capacity_v]])
expr.extend(['Vphi = np.sqrt(K_S/rho)', 'Vp = np.sqrt((K_S + 4G/3)/rho)', 'Vs = np.sqrt(G_S/rho)'])
eq.extend([[m.bulk_sound_velocity, np.sqrt(m.K_S/m.rho)],
[m.p_wave_velocity, np.sqrt((m.K_S + 4.*m.G/3.)/m.rho)],
[m.shear_wave_velocity, np.sqrt(m.G/m.rho)]])
consistencies = [np.abs(e[0] - e[1]) < np.abs(tol*e[1]) + np.finfo('float').eps for e in eq]
consistency = np.all(consistencies)
if verbose == True:
print('Checking EoS consistency for {0:s}'.format(m.to_string()))
print('Expressions within tolerance of {0:2f}'.format(tol))
for i, c in enumerate(consistencies):
print('{0:10s} : {1:5s}'.format(expr[i], str(c)))
if consistency == True:
print('All EoS consistency constraints satisfied for {0:s}'.format(m.to_string()))
else:
print('Not satisfied all EoS consistency constraints for {0:s}'.format(m.to_string()))
return consistency
def _pad_ndarray_inverse_mirror(array, padding):
"""
Pads an ndarray according to an inverse mirror
scheme. For example, for a 1D array
[2, 4, 6, 7, 8] padded by 3 cells, we have:
padding | original array | padding
-3 -2 0 | 2 4 6 7 8 | 9 10 12
Parameters
----------
array : numpy ndarray
The array to be padded
padding : tuple
The number of elements with which to pad the
array in each dimension.
Returns
-------
padded_array: numpy ndarray
The padded array
"""
padded_shape = [n + 2*padding[i] for i, n in enumerate(array.shape)]
padded_array = np.zeros(padded_shape)
slices = tuple([ slice(padding[i], padding[i] + l) for i, l in enumerate(array.shape)])
padded_array[slices] = array
padded_array_indices = list(itertools.product(*[range(n + 2*padding[i]) for i, n in enumerate(array.shape)]))
inserted_indices = list(itertools.product(*[range(padding[i], padding[i] + l) for i, l in enumerate(array.shape)]))
padded_array_indices.extend(inserted_indices)
counter = Counter(padded_array_indices)
keys = list(counter.keys())
padded_indices = [keys[i] for i, value in enumerate(counter.values()) if value == 1]
edge_indices = tuple([tuple([np.min([np.max([axis_idx, padding[dimension]]), padded_array.shape[dimension] - padding[dimension] - 1])
for dimension, axis_idx in enumerate(idx)]) for idx in padded_indices])
mirror_indices = tuple([tuple([2*edge_indices[i][j] - padded_indices[i][j] for j in range(len(array.shape))]) for i in range(len(padded_indices))])
for i, idx in enumerate(padded_indices):
padded_array[idx] = 2.*padded_array[edge_indices[i]] - padded_array[mirror_indices[i]]
return padded_array
def smooth_array(array, grid_spacing,
gaussian_rms_widths, truncate=4.0,
mode='inverse_mirror'):
"""
Creates a smoothed array by convolving it with a gaussian filter.
Grid resolutions and gaussian RMS widths are required for each of
the axes of the numpy array. The smoothing is truncated at a
user-defined number of standard deviations. The edges of the array
can be padded in a number of different ways given by the
'mode' parameter.
Parameters
----------
array : numpy ndarray
The array to smooth
grid_spacing : numpy array of floats
The spacing of points along each axis
gaussian_rms_widths : numpy array of floats
The Gaussian RMS widths/standard deviations for the
Gaussian convolution.
truncate : float (default=4.)
The number of standard deviations at which to truncate
the smoothing.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap', 'inverse_mirror'}
The mode parameter determines how the array borders are handled
either by scipy.ndimage.filters.gaussian_filter.
Default is 'inverse_mirror', which uses
burnman.tools._pad_ndarray_inverse_mirror().
Returns
-------
smoothed_array: numpy ndarray
The smoothed array
"""
# gaussian_filter works with standard deviations normalised to
# the grid spacing.
sigma = tuple(np.array(gaussian_rms_widths)/np.array(grid_spacing))
if mode == 'inverse_mirror':
padding = tuple([int(np.ceil(truncate*s)) for s in sigma])
padded_array = _pad_ndarray_inverse_mirror(array, padding)
smoothed_padded_array = gaussian_filter(padded_array,
sigma=sigma)
slices = tuple([ slice(padding[i], padding[i] + l) for i, l in enumerate(array.shape)])
smoothed_array = smoothed_padded_array[slices]
else:
smoothed_array = gaussian_filter(array, sigma=sigma, mode=mode)
return smoothed_array
def interp_smoothed_array_and_derivatives(array,
x_values, y_values,
x_stdev=0., y_stdev=0.,
truncate=4.,
mode='inverse_mirror',
indexing='xy'):
"""
Creates a smoothed array on a regular 2D grid. Smoothing
is achieved using burnman.tools.smooth_array().
Outputs scipy.interpolate.interp2d() interpolators
which can be used to query the array, or its derivatives in the
x- and y- directions.
Parameters
----------
array : 2D numpy array
The array to smooth. Each element array[i][j]
corresponds to the position x_values[i], y_values[j]
x_values : 1D numpy array
The gridded x values over which to create the smoothed grid
y_values : 1D numpy array
The gridded y_values over which to create the smoothed grid
x_stdev : float
The standard deviation for the Gaussian filter along the x axis
y_stdev : float
The standard deviation for the Gaussian filter along the x axis
truncate : float (optional)
The number of standard deviations at which to truncate
the smoothing (default = 4.).
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap', 'inverse_mirror'}
The mode parameter determines how the array borders are handled
either by scipy.ndimage.filters.gaussian_filter.
Default is 'inverse_mirror', which uses
burnman.tools._pad_ndarray_inverse_mirror().
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See numpy.meshgrid for more details.
Returns
-------
interps: tuple of three interp2d functors
interpolation functions for the smoothed property and
the first derivatives with respect to x and y.
"""
dx = x_values[1] - x_values[0]
dy = y_values[1] - y_values[0]
if indexing == 'xy':
smoothed_array = smooth_array(array = array,
grid_spacing = np.array([dy, dx]),
gaussian_rms_widths = np.array([y_stdev, x_stdev]),
truncate=truncate,
mode=mode)
elif indexing == 'ij':
smoothed_array = smooth_array(array = array,
grid_spacing = np.array([dx, dy]),
gaussian_rms_widths = np.array([x_stdev, y_stdev]),
truncate=truncate,
mode=mode).T
else:
raise Exception('Indexing scheme not recognised. Should be ij or xy.')
dSAdydy, dSAdxdx = np.gradient(smoothed_array)
interps = (interp2d(x_values, y_values, smoothed_array, kind='linear'),
interp2d(x_values, y_values, dSAdxdx/dx, kind='linear'),
interp2d(x_values, y_values, dSAdydy/dy, kind='linear'))
return interps
def attribute_function(m, attributes, powers=[]):
"""
Function which returns a function which can be used to
evaluate material properties at a point. This function
allows the user to define the property returned
as a string. The function can itself be passed to another
function
(such as nonlinear_fitting.confidence_prediction_bands()).
Properties can either be simple attributes (e.g. K_T) or
a product of attributes, each raised to some power.
Parameters
----------
m : Material
The material instance evaluated by the output function.
attributes : list of strings
The list of material attributes / properties to
be evaluated in the product
powers : list of floats
The powers to which each attribute should be raised
during evaluation
Returns
-------
f : function(x)
Function which returns the value of product(a_i**p_i)
as a function of condition (x = [P, T, V])
"""
if type(attributes) is str:
attributes = [attributes]
if powers == []:
powers = [1. for a in attributes]
def f(x):
P, T, V = x
m.set_state(P, T)
value = 1.
for a, p in zip(*[attributes, powers]):
value *= np.power(getattr(m, a), p)
return value
return f
|
gpl-2.0
|
openEduConnect/eduextractor
|
eduextractor/sis/illuminate/illuminate_exporter.py
|
1
|
1702
|
import pandas as pd
from ...config import _load_secrets
import sqlalchemy
import os
from tqdm import tqdm
class IlluminateSQLInterface:
"""A class representing a SQL interface to
Illuminate
"""
def __init__(self, secrets=None):
if secrets is None:
secrets = _load_secrets()
try:
SECRETS = secrets['illuminate']
self.username = SECRETS['username']
self.password = SECRETS['password']
self.host = SECRETS['host']
self.dbname = SECRETS['dbname']
self.port = str(SECRETS['port'])
except KeyError:
print("Please check the configuration of your config file")
engine = sqlalchemy.create_engine('postgres://' + self.username +
':' + self.password +
'@' + self.host + ':' +
self.port + '/' +
self.dbname)
self.conn = engine.connect()
def query_to_df(query):
"""executes query, converts to pd.dataframe"""
df = pd.read_sql(query, conn)
return df
def _list_queries(file_dir='./sql'):
return os.listdir(file_dir)
def download_files():
files = self._list_queries()
for file_name in tqdm(files):
with open('./sql/' + file_name, 'r') as filebuf:
data = filebuf.read()
df = query_to_df(data)
file_name = file_name.replace('.sql','.csv')
df.to_csv('/tmp/' + file_name)
if __name__ == '__main__':
IlluminateSQLInterface.download_files()
|
mit
|
LeeKamentsky/CellProfiler
|
cellprofiler/modules/calculateimageoverlap.py
|
1
|
50378
|
import cellprofiler.icons
from cellprofiler.gui.help import PROTIP_RECOMEND_ICON
__doc__ = '''
<b>Calculate Image Overlap </b> calculates how much overlap occurs between the white portions of two black and white images
<hr>
This module calculates overlap by determining a set of statistics that measure the closeness of an image or object
to its' true value. One image/object is considered the "ground truth" (possibly the result of hand-segmentation) and the other
is the "test" image/object; the images are determined to overlap most completely when the test image matches the ground
truth perfectly. If using images, the module requires binary (black and white) input, where the foreground is white and
the background is black. If you segment your images in CellProfiler using <b>IdentifyPrimaryObjects</b>,
you can create such an image using <b>ConvertObjectsToImage</b> by selecting <i>Binary</i> as the color type.
If your images have been segmented using other image processing software, or you have hand-segmented them in software
such as Photoshop, you may need to use one or more of the following to prepare the images for this module:
<ul>
<li> <b>ImageMath</b>: If the objects are black and the background is white, you must invert the intensity using this module.</li>
<li> <b>ApplyThreshold</b>: If the image is grayscale, you must make it binary using this module, or alternately use an <b>Identify</b> module followed by <b>ConvertObjectsToImage</b> as described above. </li>
<li> <b>ColorToGray</b>: If the image is in color, you must first convert it to grayscale using this module, and then use <b>ApplyThreshold</b> to generate a binary image. </li>
</ul>
In the test image, any foreground (white) pixels that overlap with the foreground of the ground
truth will be considered "true positives", since they are correctly labeled as foreground. Background (black)
pixels that overlap with the background of the ground truth image are considered "true negatives",
since they are correctly labeled as background. A foreground pixel in the test image that overlaps with the background in the ground truth image will
be considered a "false positive" (since it should have been labeled as part of the background),
while a background pixel in the test image that overlaps with foreground in the ground truth will be considered a "false negative"
(since it was labeled as part of the background, but should not be).
<h4>Available measurements</h4>
<ul>
<li><b>For images and objects:</b>
<ul>
<li><i>True positive rate:</i> Total number of true positive pixels / total number of actual positive pixels.</li>
<li><i>False positive rate:</i> Total number of false positive pixels / total number of actual negative pixels </li>
<li><i>True negative rate:</i> Total number of true negative pixels / total number of actual negative pixels.</li>
<li><i>False negative rate:</i> Total number of false negative pixels / total number of actual postive pixels </li>
<li><i>Precision:</i> Number of true positive pixels / (number of true positive pixels + number of false positive pixels) </li>
<li><i>Recall:</i> Number of true positive pixels/ (number of true positive pixels + number of false negative pixels) </li>
<li><i>F-factor:</i> 2 × (precision × recall)/(precision + recall). Also known as F<sub>1</sub> score, F-score or F-measure.</li>
<li><i>Earth mover's distance:</i>The minimum distance required to move each
foreground pixel in the test image to some corresponding foreground pixel in the
reference image.</li>
</ul>
</li>
<li><b>For objects:</b>
<ul>
<li><i>Rand index:</i> A measure of the similarity between two data clusterings. Perfectly random clustering returns the minimum
score of 0, perfect clustering returns the maximum score of 1.</li>
<li><i>Adjusted Rand index:</i> A variation of the Rand index which takes into account the fact that random chance will cause some
objects to occupy the same clusters, so the Rand Index will never actually be zero. Can return a value between -1 and +1.</li>
</ul>
</li>
</ul>
<h4>References</h4>
<ul>
<li>Collins LM, Dent CW (1998) "Omega: A general formulation of the Rand Index of cluster
recovery suitable for non-disjoint solutions", <i>Multivariate Behavioral
Research</i>, 23, 231-242 <a href="http://dx.doi.org/10.1207/s15327906mbr2302_6">(link)</a></li>
<li>Pele O, Werman M (2009) "Fast and Robust Earth Mover's Distances",
<i>2009 IEEE 12th International Conference on Computer Vision</i></li>
</ul>
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import numpy as np
from contrib.english import ordinal
from scipy.ndimage import label, distance_transform_edt
from scipy.sparse import coo_matrix
import cellprofiler.cpimage as cpi
import cellprofiler.cpmodule as cpm
import cellprofiler.objects as cpo
import cellprofiler.measurements as cpmeas
import cellprofiler.settings as cps
import cellprofiler.cpmath.cpmorphology as morph
from cellprofiler.cpmath.index import Indexes
from cellprofiler.cpmath.fastemd import emd_hat_int32
from cellprofiler.cpmath.propagate import propagate
from cellprofiler.cpmath.filter import poisson_equation
C_IMAGE_OVERLAP = "Overlap"
FTR_F_FACTOR = "Ffactor"
FTR_PRECISION = "Precision"
FTR_RECALL = "Recall"
FTR_TRUE_POS_RATE = "TruePosRate"
FTR_FALSE_POS_RATE = "FalsePosRate"
FTR_FALSE_NEG_RATE = "FalseNegRate"
FTR_TRUE_NEG_RATE = "TrueNegRate"
FTR_RAND_INDEX = "RandIndex"
FTR_ADJUSTED_RAND_INDEX = "AdjustedRandIndex"
FTR_EARTH_MOVERS_DISTANCE = "EarthMoversDistance"
FTR_ALL = [FTR_F_FACTOR, FTR_PRECISION, FTR_RECALL,
FTR_TRUE_POS_RATE, FTR_TRUE_NEG_RATE,
FTR_FALSE_POS_RATE, FTR_FALSE_NEG_RATE,
FTR_RAND_INDEX, FTR_ADJUSTED_RAND_INDEX]
O_OBJ = "Segmented objects"
O_IMG = "Foreground/background segmentation"
O_ALL = [O_OBJ, O_IMG]
L_LOAD = "Loaded from a previous run"
L_CP = "From this CP pipeline"
DM_KMEANS = "K Means"
DM_SKEL = "Skeleton"
class CalculateImageOverlap(cpm.CPModule):
category = "Measurement"
variable_revision_number = 4
module_name = "CalculateImageOverlap"
def create_settings(self):
self.obj_or_img = cps.Choice(
"Compare segmented objects, or foreground/background?", O_ALL)
self.ground_truth = cps.ImageNameSubscriber(
"Select the image to be used as the ground truth basis for calculating the amount of overlap",
cps.NONE, doc = """
<i>(Used only when comparing foreground/background)</i> <br>
This binary (black and white) image is known as the "ground truth" image. It can be the product of segmentation performed by hand, or
the result of another segmentation algorithm whose results you would like to compare.""")
self.test_img = cps.ImageNameSubscriber(
"Select the image to be used to test for overlap",
cps.NONE, doc = """
<i>(Used only when comparing foreground/background)</i> <br>
This binary (black and white) image is what you will compare with the ground truth image. It is known as the "test image".""")
self.object_name_GT = cps.ObjectNameSubscriber(
"Select the objects to be used as the ground truth basis for calculating the amount of overlap",
cps.NONE, doc ="""
<i>(Used only when comparing segmented objects)</i> <br>
Choose which set of objects will used as the "ground truth" objects. It can be the product of segmentation performed by hand, or
the result of another segmentation algorithm whose results you would like to compare. See the <b>Load</b> modules for more details
on loading objects.""")
self.object_name_ID = cps.ObjectNameSubscriber(
"Select the objects to be tested for overlap against the ground truth",
cps.NONE, doc ="""
<i>(Used only when comparing segmented objects)</i> <br>
This set of objects is what you will compare with the ground truth objects. It is known as the "test object." """)
self.wants_emd = cps.Binary(
"Calculate earth mover's distance?", False,
doc="""The earth mover's distance computes the shortest distance
that would have to be travelled to move each foreground pixel in the
test image to some foreground pixel in the reference image.
"Earth mover's" refers to an analogy: the pixels are "earth" that
has to be moved by some machine at the smallest possible cost.
<br>
It would take too much memory and processing time to compute the
exact earth mover's distance, so <b>CalculateImageOverlap</b>
chooses representative foreground pixels in each image and
assigns each foreground pixel to its closest representative. The
earth mover's distance is then computed for moving the foreground
pixels associated with each representative in the test image to
those in the reference image.
""")
self.max_points = cps.Integer(
"Maximum # of points", value=250,
minval = 100,
doc = """
<i>(Used only when computing the earth mover's distance)</i> <br>
This is the number of representative points that will be taken
from the foreground of the test image and from the foreground of
the reference image using the point selection method (see below).
""")
self.decimation_method = cps.Choice(
"Point selection method",
choices = [DM_KMEANS, DM_SKEL],
doc = """
<i>(Used only when computing the earth mover's distance)</i> <br>
The point selection setting determines how the
representative points are chosen.
<ul>
<li><i>%(DM_KMEANS)s:</i> Select to pick representative points using a
K-Means clustering technique. The foregrounds of both images are combined
and representatives are picked that minimize the distance to the nearest
representative. The same representatives are then used for the test and
reference images.</li>
<li><i>%(DM_SKEL)s:</i> Select to skeletonize the image and pick
points eqidistant along the skeleton. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
<i>%(DM_KMEANS)s</i> is a
choice that's generally applicable to all images. <i>%(DM_SKEL)s</i>
is best suited to long, skinny objects such as worms or neurites.</dd>
</dl>
""" % globals())
self.max_distance = cps.Integer(
"Maximum distance", value=250, minval=1,
doc = """
<i>(Used only when computing the earth mover's distance)</i> <br>
This setting sets an upper bound to the distance penalty
assessed during the movement calculation. As an example, the score
for moving 10 pixels from one location to a location that is
100 pixels away is 10*100, but if the maximum distance were set
to 50, the score would be 10*50 instead.
<br>
The maximum distance should be set to the largest reasonable
distance that pixels could be expected to move from one image
to the next.
""")
self.penalize_missing = cps.Binary(
"Penalize missing pixels", value=False,
doc = """
<i>(Used only when computing the earth mover's distance)</i> <br>
If one image has more foreground pixels than the other, the
earth mover's distance is not well-defined because there is
no destination for the extra source pixels or vice-versa.
It's reasonable to assess a penalty for the discrepancy when
comparing the accuracy of a segmentation because the discrepancy
represents an error. It's also reasonable to assess no penalty
if the goal is to compute the cost of movement, for example between
two frames in a time-lapse movie, because the discrepancy is
likely caused by noise or artifacts in segmentation.
Set this setting to "Yes" to assess a penalty equal to the
maximum distance times the absolute difference in number of
foreground pixels in the two images. Set this setting to "No"
to assess no penalty.
""")
def settings(self):
result = [self.obj_or_img, self.ground_truth, self.test_img,
self.object_name_GT, self.object_name_ID,
self.wants_emd, self.max_points, self.decimation_method,
self.max_distance, self.penalize_missing]
return result
def visible_settings(self):
result = [self.obj_or_img]
if self.obj_or_img == O_IMG:
result += [self.ground_truth, self.test_img]
elif self.obj_or_img == O_OBJ:
result += [self.object_name_GT, self.object_name_ID]
result += [self.wants_emd]
if self.wants_emd:
result += [self.max_points, self.decimation_method,
self.max_distance, self.penalize_missing]
return result
def run(self,workspace):
if self.obj_or_img == O_IMG:
self.measure_image(workspace)
elif self.obj_or_img == O_OBJ:
self.measure_objects(workspace)
def measure_image(self, workspace):
'''Add the image overlap measurements'''
image_set = workspace.image_set
ground_truth_image = image_set.get_image(self.ground_truth.value,
must_be_binary = True)
test_image = image_set.get_image(self.test_img.value,
must_be_binary = True)
ground_truth_pixels = ground_truth_image.pixel_data
ground_truth_pixels = test_image.crop_image_similarly(ground_truth_pixels)
mask = ground_truth_image.mask
mask = test_image.crop_image_similarly(mask)
if test_image.has_mask:
mask = mask & test_image.mask
test_pixels = test_image.pixel_data
false_positives = test_pixels & ~ ground_truth_pixels
false_positives[~ mask] = False
false_negatives = (~ test_pixels) & ground_truth_pixels
false_negatives[~ mask] = False
true_positives = test_pixels & ground_truth_pixels
true_positives[ ~ mask] = False
true_negatives = (~ test_pixels) & (~ ground_truth_pixels)
true_negatives[~ mask] = False
false_positive_count = np.sum(false_positives)
true_positive_count = np.sum(true_positives)
false_negative_count = np.sum(false_negatives)
true_negative_count = np.sum(true_negatives)
labeled_pixel_count = true_positive_count + false_positive_count
true_count = true_positive_count + false_negative_count
##################################
#
# Calculate the F-Factor
#
# 2 * precision * recall
# -----------------------
# precision + recall
#
# precision = true positives / labeled
# recall = true positives / true count
#
###################################
if labeled_pixel_count == 0:
precision = 1.0
else:
precision = float(true_positive_count) / float(labeled_pixel_count)
if true_count == 0:
recall = 1.0
else:
recall = float(true_positive_count) / float(true_count)
if (precision + recall) == 0:
f_factor = 0.0 # From http://en.wikipedia.org/wiki/F1_score
else:
f_factor = 2.0 * precision * recall / (precision + recall)
negative_count = false_positive_count + true_negative_count
if negative_count == 0:
false_positive_rate = 0.0
true_negative_rate = 1.0
else:
false_positive_rate = (float(false_positive_count) /
float(negative_count))
true_negative_rate = (float(true_negative_count) /
float(negative_count))
if true_count == 0:
false_negative_rate = 0.0
true_positive_rate = 1.0
else:
false_negative_rate = (float(false_negative_count) /
float(true_count))
true_positive_rate = (float(true_positive_count) /
float(true_count))
ground_truth_labels, ground_truth_count = label(
ground_truth_pixels & mask, np.ones((3, 3), bool))
test_labels, test_count = label(
test_pixels & mask, np.ones((3, 3), bool))
rand_index, adjusted_rand_index = self.compute_rand_index(
test_labels, ground_truth_labels, mask)
m = workspace.measurements
m.add_image_measurement(self.measurement_name(FTR_F_FACTOR), f_factor)
m.add_image_measurement(self.measurement_name(FTR_PRECISION),
precision)
m.add_image_measurement(self.measurement_name(FTR_RECALL), recall)
m.add_image_measurement(self.measurement_name(FTR_TRUE_POS_RATE),
true_positive_rate)
m.add_image_measurement(self.measurement_name(FTR_FALSE_POS_RATE),
false_positive_rate)
m.add_image_measurement(self.measurement_name(FTR_TRUE_NEG_RATE),
true_negative_rate)
m.add_image_measurement(self.measurement_name(FTR_FALSE_NEG_RATE),
false_negative_rate)
m.add_image_measurement(self.measurement_name(FTR_RAND_INDEX),
rand_index)
m.add_image_measurement(self.measurement_name(FTR_ADJUSTED_RAND_INDEX),
adjusted_rand_index)
if self.wants_emd:
test_objects = cpo.Objects()
test_objects.segmented = test_labels
ground_truth_objects = cpo.Objects()
ground_truth_objects.segmented = ground_truth_labels
emd = self.compute_emd(test_objects, ground_truth_objects)
m.add_image_measurement(
self.measurement_name(FTR_EARTH_MOVERS_DISTANCE), emd)
if self.show_window:
workspace.display_data.true_positives = true_positives
workspace.display_data.true_negatives = true_negatives
workspace.display_data.false_positives = false_positives
workspace.display_data.false_negatives = false_negatives
workspace.display_data.rand_index = rand_index
workspace.display_data.adjusted_rand_index = adjusted_rand_index
workspace.display_data.statistics = [
(FTR_F_FACTOR, f_factor),
(FTR_PRECISION, precision),
(FTR_RECALL, recall),
(FTR_FALSE_POS_RATE, false_positive_rate),
(FTR_FALSE_NEG_RATE, false_negative_rate),
(FTR_RAND_INDEX, rand_index),
(FTR_ADJUSTED_RAND_INDEX, adjusted_rand_index)
]
if self.wants_emd:
workspace.display_data.statistics.append(
(FTR_EARTH_MOVERS_DISTANCE, emd))
def measure_objects(self, workspace):
image_set = workspace.image_set
object_name_GT = self.object_name_GT.value
objects_GT = workspace.get_objects(object_name_GT)
iGT,jGT,lGT = objects_GT.ijv.transpose()
object_name_ID = self.object_name_ID.value
objects_ID = workspace.get_objects(object_name_ID)
iID, jID, lID = objects_ID.ijv.transpose()
ID_obj = 0 if len(lID) == 0 else max(lID)
GT_obj = 0 if len(lGT) == 0 else max(lGT)
xGT, yGT = objects_GT.shape
xID, yID = objects_ID.shape
GT_pixels = np.zeros((xGT, yGT))
ID_pixels = np.zeros((xID, yID))
total_pixels = xGT*yGT
GT_pixels[iGT, jGT] = 1
ID_pixels[iID, jID] = 1
GT_tot_area = len(iGT)
if len(iGT) == 0 and len(iID) == 0:
intersect_matrix = np.zeros((0, 0), int)
else:
#
# Build a matrix with rows of i, j, label and a GT/ID flag
#
all_ijv = np.column_stack(
(np.hstack((iGT, iID)),
np.hstack((jGT, jID)),
np.hstack((lGT, lID)),
np.hstack((np.zeros(len(iGT)), np.ones(len(iID))))))
#
# Order it so that runs of the same i, j are consecutive
#
order = np.lexsort((all_ijv[:, -1], all_ijv[:, 0], all_ijv[:, 1]))
all_ijv = all_ijv[order, :]
# Mark the first at each i, j != previous i, j
first = np.where(np.hstack(
([True],
~ np.all(all_ijv[:-1, :2] == all_ijv[1:, :2], 1),
[True])))[0]
# Count # at each i, j
count = first[1:] - first[:-1]
# First indexer - mapping from i,j to index in all_ijv
all_ijv_map = Indexes([count])
# Bincount to get the # of ID pixels per i,j
id_count = np.bincount(all_ijv_map.rev_idx,
all_ijv[:, -1]).astype(int)
gt_count = count - id_count
# Now we can create an indexer that has NxM elements per i,j
# where N is the number of GT pixels at that i,j and M is
# the number of ID pixels. We can then use the indexer to pull
# out the label values for each to populate a sparse array.
#
cross_map = Indexes([id_count, gt_count])
off_gt = all_ijv_map.fwd_idx[cross_map.rev_idx] + cross_map.idx[0]
off_id = all_ijv_map.fwd_idx[cross_map.rev_idx] + cross_map.idx[1]+\
id_count[cross_map.rev_idx]
intersect_matrix = coo_matrix(
(np.ones(len(off_gt)),
(all_ijv[off_id, 2], all_ijv[off_gt, 2])),
shape = (ID_obj+1, GT_obj+1)).toarray()[1:, 1:]
gt_areas = objects_GT.areas
id_areas = objects_ID.areas
FN_area = gt_areas[np.newaxis, :] - intersect_matrix
all_intersecting_area = np.sum(intersect_matrix)
dom_ID = []
for i in range(0, ID_obj):
indices_jj = np.nonzero(lID==i)
indices_jj = indices_jj[0]
id_i = iID[indices_jj]
id_j = jID[indices_jj]
ID_pixels[id_i, id_j] = 1
for i in intersect_matrix: # loop through the GT objects first
if len(i) == 0 or max(i) == 0:
id = -1 # we missed the object; arbitrarily assign -1 index
else:
id = np.where(i == max(i))[0][0] # what is the ID of the max pixels?
dom_ID += [id] # for ea GT object, which is the dominating ID?
dom_ID = np.array(dom_ID)
for i in range(0, len(intersect_matrix.T)):
if len(np.where(dom_ID == i)[0]) > 1:
final_id = np.where(intersect_matrix.T[i] == max(intersect_matrix.T[i]))
final_id = final_id[0][0]
all_id = np.where(dom_ID == i)[0]
nonfinal = [x for x in all_id if x != final_id]
for n in nonfinal: # these others cannot be candidates for the corr ID now
intersect_matrix.T[i][n] = 0
else :
continue
TP = 0
FN = 0
FP = 0
for i in range(0,len(dom_ID)):
d = dom_ID[i]
if d == -1:
tp = 0
fn = id_areas[i]
fp = 0
else:
fp = np.sum(intersect_matrix[i][0:d])+np.sum(intersect_matrix[i][(d+1)::])
tp = intersect_matrix[i][d]
fn = FN_area[i][d]
TP += tp
FN += fn
FP += fp
TN = max(0, total_pixels - TP - FN - FP)
def nan_divide(numerator, denominator):
if denominator == 0:
return np.nan
return float(numerator) / float(denominator)
accuracy = nan_divide(TP, all_intersecting_area)
recall = nan_divide(TP, GT_tot_area)
precision = nan_divide(TP, (TP+FP))
F_factor = nan_divide(2*(precision*recall), (precision+recall))
true_positive_rate = nan_divide(TP, (FN+TP))
false_positive_rate = nan_divide(FP, (FP+TN))
false_negative_rate = nan_divide(FN, (FN+TP))
true_negative_rate = nan_divide(TN , (FP+TN))
shape = np.maximum(np.maximum(
np.array(objects_GT.shape), np.array(objects_ID.shape)),
np.ones(2, int))
rand_index, adjusted_rand_index = self.compute_rand_index_ijv(
objects_GT.ijv, objects_ID.ijv, shape)
m = workspace.measurements
m.add_image_measurement(self.measurement_name(FTR_F_FACTOR), F_factor)
m.add_image_measurement(self.measurement_name(FTR_PRECISION),
precision)
m.add_image_measurement(self.measurement_name(FTR_RECALL), recall)
m.add_image_measurement(self.measurement_name(FTR_TRUE_POS_RATE),
true_positive_rate)
m.add_image_measurement(self.measurement_name(FTR_FALSE_POS_RATE),
false_positive_rate)
m.add_image_measurement(self.measurement_name(FTR_TRUE_NEG_RATE),
true_negative_rate)
m.add_image_measurement(self.measurement_name(FTR_FALSE_NEG_RATE),
false_negative_rate)
m.add_image_measurement(self.measurement_name(FTR_RAND_INDEX),
rand_index)
m.add_image_measurement(self.measurement_name(FTR_ADJUSTED_RAND_INDEX),
adjusted_rand_index)
def subscripts(condition1, condition2):
x1,y1 = np.where(GT_pixels == condition1)
x2,y2 = np.where(ID_pixels == condition2)
mask = set(zip(x1,y1)) & set(zip(x2,y2))
return list(mask)
TP_mask = subscripts(1,1)
FN_mask = subscripts(1,0)
FP_mask = subscripts(0,1)
TN_mask = subscripts(0,0)
TP_pixels = np.zeros((xGT,yGT))
FN_pixels = np.zeros((xGT,yGT))
FP_pixels = np.zeros((xGT,yGT))
TN_pixels = np.zeros((xGT,yGT))
def maskimg(mask,img):
for ea in mask:
img[ea] = 1
return img
TP_pixels = maskimg(TP_mask, TP_pixels)
FN_pixels = maskimg(FN_mask, FN_pixels)
FP_pixels = maskimg(FP_mask, FP_pixels)
TN_pixels = maskimg(TN_mask, TN_pixels)
if self.wants_emd:
emd = self.compute_emd(objects_ID, objects_GT)
m.add_image_measurement(
self.measurement_name(FTR_EARTH_MOVERS_DISTANCE), emd)
if self.show_window:
workspace.display_data.true_positives = TP_pixels
workspace.display_data.true_negatives = TN_pixels
workspace.display_data.false_positives = FP_pixels
workspace.display_data.false_negatives = FN_pixels
workspace.display_data.statistics = [
(FTR_F_FACTOR, F_factor),
(FTR_PRECISION, precision),
(FTR_RECALL, recall),
(FTR_FALSE_POS_RATE, false_positive_rate),
(FTR_FALSE_NEG_RATE, false_negative_rate),
(FTR_RAND_INDEX, rand_index),
(FTR_ADJUSTED_RAND_INDEX, adjusted_rand_index)
]
if self.wants_emd:
workspace.display_data.statistics.append(
(FTR_EARTH_MOVERS_DISTANCE, emd))
def compute_rand_index(self, test_labels, ground_truth_labels, mask):
"""Caluclate the Rand Index
http://en.wikipedia.org/wiki/Rand_index
Given a set of N elements and two partitions of that set, X and Y
A = the number of pairs of elements in S that are in the same set in
X and in the same set in Y
B = the number of pairs of elements in S that are in different sets
in X and different sets in Y
C = the number of pairs of elements in S that are in the same set in
X and different sets in Y
D = the number of pairs of elements in S that are in different sets
in X and the same set in Y
The rand index is: A + B
-----
A+B+C+D
The adjusted rand index is the rand index adjusted for chance
so as not to penalize situations with many segmentations.
Jorge M. Santos, Mark Embrechts, "On the Use of the Adjusted Rand
Index as a Metric for Evaluating Supervised Classification",
Lecture Notes in Computer Science,
Springer, Vol. 5769, pp. 175-184, 2009. Eqn # 6
ExpectedIndex = best possible score
ExpectedIndex = sum(N_i choose 2) * sum(N_j choose 2)
MaxIndex = worst possible score = 1/2 (sum(N_i choose 2) + sum(N_j choose 2)) * total
A * total - ExpectedIndex
-------------------------
MaxIndex - ExpectedIndex
returns a tuple of the Rand Index and the adjusted Rand Index
"""
ground_truth_labels = ground_truth_labels[mask].astype(np.uint64)
test_labels = test_labels[mask].astype(np.uint64)
if len(test_labels) > 0:
#
# Create a sparse matrix of the pixel labels in each of the sets
#
# The matrix, N(i,j) gives the counts of all of the pixels that were
# labeled with label I in the ground truth and label J in the
# test set.
#
N_ij = coo_matrix((np.ones(len(test_labels)),
(ground_truth_labels, test_labels))).toarray()
def choose2(x):
'''Compute # of pairs of x things = x * (x-1) / 2'''
return x * (x - 1) / 2
#
# Each cell in the matrix is a count of a grouping of pixels whose
# pixel pairs are in the same set in both groups. The number of
# pixel pairs is n * (n - 1), so A = sum(matrix * (matrix - 1))
#
A = np.sum(choose2(N_ij))
#
# B is the sum of pixels that were classified differently by both
# sets. But the easier calculation is to find A, C and D and get
# B by subtracting A, C and D from the N * (N - 1), the total
# number of pairs.
#
# For C, we take the number of pixels classified as "i" and for each
# "j", subtract N(i,j) from N(i) to get the number of pixels in
# N(i,j) that are in some other set = (N(i) - N(i,j)) * N(i,j)
#
# We do the similar calculation for D
#
N_i = np.sum(N_ij, 1)
N_j = np.sum(N_ij, 0)
C = np.sum((N_i[:, np.newaxis] - N_ij) * N_ij) / 2
D = np.sum((N_j[np.newaxis, :] - N_ij) * N_ij) / 2
total = choose2(len(test_labels))
# an astute observer would say, why bother computing A and B
# when all we need is A+B and C, D and the total can be used to do
# that. The calculations aren't too expensive, though, so I do them.
B = total - A - C - D
rand_index = (A + B) / total
#
# Compute adjusted Rand Index
#
expected_index = np.sum(choose2(N_i)) * np.sum(choose2(N_j))
max_index = (np.sum(choose2(N_i)) + np.sum(choose2(N_j))) * total / 2
adjusted_rand_index = \
(A * total - expected_index) / (max_index - expected_index)
else:
rand_index = adjusted_rand_index = np.nan
return rand_index, adjusted_rand_index
def compute_rand_index_ijv(self, gt_ijv, test_ijv, shape):
'''Compute the Rand Index for an IJV matrix
This is in part based on the Omega Index:
Collins, "Omega: A General Formulation of the Rand Index of Cluster
Recovery Suitable for Non-disjoint Solutions", Multivariate Behavioral
Research, 1988, 23, 231-242
The basic idea of the paper is that a pair should be judged to
agree only if the number of clusters in which they appear together
is the same.
'''
#
# The idea here is to assign a label to every pixel position based
# on the set of labels given to that position by both the ground
# truth and the test set. We then assess each pair of labels
# as agreeing or disagreeing as to the number of matches.
#
# First, add the backgrounds to the IJV with a label of zero
#
gt_bkgd = np.ones(shape, bool)
gt_bkgd[gt_ijv[:, 0], gt_ijv[:, 1]] = False
test_bkgd = np.ones(shape, bool)
test_bkgd[test_ijv[:, 0], test_ijv[:, 1]] = False
gt_ijv = np.vstack([
gt_ijv,
np.column_stack([np.argwhere(gt_bkgd),
np.zeros(np.sum(gt_bkgd), gt_bkgd.dtype)])])
test_ijv = np.vstack([
test_ijv,
np.column_stack([np.argwhere(test_bkgd),
np.zeros(np.sum(test_bkgd), test_bkgd.dtype)])])
#
# Create a unified structure for the pixels where a fourth column
# tells you whether the pixels came from the ground-truth or test
#
u = np.vstack([
np.column_stack([gt_ijv, np.zeros(gt_ijv.shape[0], gt_ijv.dtype)]),
np.column_stack([test_ijv, np.ones(test_ijv.shape[0], test_ijv.dtype)])])
#
# Sort by coordinates, then by identity
#
order = np.lexsort([u[:, 2], u[:, 3], u[:, 0], u[:, 1]])
u = u[order, :]
# Get rid of any duplicate labelings (same point labeled twice with
# same label.
#
first = np.hstack([[True], np.any(u[:-1, :] != u[1:, :], 1)])
u = u[first, :]
#
# Create a 1-d indexer to point at each unique coordinate.
#
first_coord_idxs = np.hstack([
[0],
np.argwhere((u[:-1, 0] != u[1:, 0]) |
(u[:-1, 1] != u[1:, 1])).flatten() + 1,
[u.shape[0]]])
first_coord_counts = first_coord_idxs[1:] - first_coord_idxs[:-1]
indexes = Indexes([first_coord_counts])
#
# Count the number of labels at each point for both gt and test
#
count_test = np.bincount(indexes.rev_idx, u[:, 3]).astype(np.int64)
count_gt = first_coord_counts - count_test
#
# For each # of labels, pull out the coordinates that have
# that many labels. Count the number of similarly labeled coordinates
# and record the count and labels for that group.
#
labels = []
for i in range(1, np.max(count_test)+1):
for j in range(1, np.max(count_gt)+1):
match = ((count_test[indexes.rev_idx] == i) &
(count_gt[indexes.rev_idx] == j))
if not np.any(match):
continue
#
# Arrange into an array where the rows are coordinates
# and the columns are the labels for that coordinate
#
lm = u[match, 2].reshape(np.sum(match) / (i+j), i+j)
#
# Sort by label.
#
order = np.lexsort(lm.transpose())
lm = lm[order, :]
#
# Find indices of unique and # of each
#
lm_first = np.hstack([
[0],
np.argwhere(np.any(lm[:-1, :] != lm[1:, :], 1)).flatten()+1,
[lm.shape[0]]])
lm_count = lm_first[1:] - lm_first[:-1]
for idx, count in zip(lm_first[:-1], lm_count):
labels.append((count,
lm[idx, :j],
lm[idx, j:]))
#
# We now have our sets partitioned. Do each against each to get
# the number of true positive and negative pairs.
#
max_t_labels = reduce(max, [len(t) for c, t, g in labels], 0)
max_g_labels = reduce(max, [len(g) for c, t, g in labels], 0)
#
# tbl is the contingency table from Table 4 of the Collins paper
# It's a table of the number of pairs which fall into M sets
# in the ground truth case and N in the test case.
#
tbl = np.zeros(((max_t_labels + 1), (max_g_labels + 1)))
for i, (c1, tobject_numbers1, gobject_numbers1) in enumerate(labels):
for j, (c2, tobject_numbers2, gobject_numbers2) in \
enumerate(labels[i:]):
nhits_test = np.sum(
tobject_numbers1[:, np.newaxis] ==
tobject_numbers2[np.newaxis, :])
nhits_gt = np.sum(
gobject_numbers1[:, np.newaxis] ==
gobject_numbers2[np.newaxis, :])
if j == 0:
N = c1 * (c1 - 1) / 2
else:
N = c1 * c2
tbl[nhits_test, nhits_gt] += N
N = np.sum(tbl)
#
# Equation 13 from the paper
#
min_JK = min(max_t_labels, max_g_labels)+1
rand_index = np.sum(tbl[:min_JK, :min_JK] * np.identity(min_JK)) / N
#
# Equation 15 from the paper, the expected index
#
e_omega = np.sum(np.sum(tbl[:min_JK,:min_JK], 0) *
np.sum(tbl[:min_JK,:min_JK], 1)) / N **2
#
# Equation 16 is the adjusted index
#
adjusted_rand_index = (rand_index - e_omega) / (1 - e_omega)
return rand_index, adjusted_rand_index
def compute_emd(self, src_objects, dest_objects):
'''Compute the earthmovers distance between two sets of objects
src_objects - move pixels from these objects
dest_objects - move pixels to these objects
returns the earth mover's distance
'''
#
# if either foreground set is empty, the emd is the penalty.
#
for angels, demons in ((src_objects, dest_objects),
(dest_objects, src_objects)):
if angels.count == 0:
if self.penalize_missing:
return np.sum(demons.areas) * self.max_distance.value
else:
return 0
if self.decimation_method == DM_KMEANS:
isrc, jsrc = self.get_kmeans_points(src_objects, dest_objects)
idest, jdest = isrc, jsrc
else:
isrc, jsrc = self.get_skeleton_points(src_objects)
idest, jdest = self.get_skeleton_points(dest_objects)
src_weights, dest_weights = [
self.get_weights(i, j, self.get_labels_mask(objects))
for i, j, objects in ((isrc, jsrc, src_objects),
(idest, jdest, dest_objects))]
ioff, joff = [src[:, np.newaxis] - dest[np.newaxis, :]
for src, dest in ((isrc, idest), (jsrc, jdest))]
c = np.sqrt(ioff*ioff + joff*joff).astype(np.int32)
c[c>self.max_distance.value] = self.max_distance.value
extra_mass_penalty = \
self.max_distance.value if self.penalize_missing else 0
return emd_hat_int32(
src_weights.astype(np.int32),
dest_weights.astype(np.int32),
c,
extra_mass_penalty = extra_mass_penalty)
def get_labels_mask(self, obj):
labels_mask = np.zeros(obj.shape, bool)
for labels, indexes in obj.get_labels():
labels_mask = labels_mask | labels > 0
return labels_mask
def get_skeleton_points(self, obj):
'''Get points by skeletonizing the objects and decimating'''
ii = []
jj = []
total_skel = np.zeros(obj.shape, bool)
for labels, indexes in obj.get_labels():
colors = morph.color_labels(labels)
for color in range(1, np.max(colors) + 1):
labels_mask = colors == color
skel = morph.skeletonize(
labels_mask,
ordering = distance_transform_edt(labels_mask) *
poisson_equation(labels_mask))
total_skel = total_skel | skel
n_pts = np.sum(total_skel)
if n_pts == 0:
return np.zeros(0, np.int32), np.zeros(0, np.int32)
i, j = np.where(total_skel)
if n_pts > self.max_points.value:
#
# Decimate the skeleton by finding the branchpoints in the
# skeleton and propagating from those.
#
markers = np.zeros(total_skel.shape, np.int32)
branchpoints = \
morph.branchpoints(total_skel) | morph.endpoints(total_skel)
markers[branchpoints] = np.arange(np.sum(branchpoints))+1
#
# We compute the propagation distance to that point, then impose
# a slightly arbitarary order to get an unambiguous ordering
# which should number the pixels in a skeleton branch monotonically
#
ts_labels, distances = propagate(np.zeros(markers.shape),
markers, total_skel, 1)
order = np.lexsort((j, i, distances[i, j], ts_labels[i, j]))
#
# Get a linear space of self.max_points elements with bounds at
# 0 and len(order)-1 and use that to select the points.
#
order = order[
np.linspace(0, len(order)-1, self.max_points.value).astype(int)]
return i[order], j[order]
return i, j
def get_kmeans_points(self, src_obj, dest_obj):
'''Get representative points in the objects using K means
src_obj - get some of the foreground points from the source objects
dest_obj - get the rest of the foreground points from the destination
objects
returns a vector of i coordinates of representatives and a vector
of j coordinates
'''
from sklearn.cluster import KMeans
ijv = np.vstack((src_obj.ijv, dest_obj.ijv))
if len(ijv) <= self.max_points.value:
return ijv[:, 0], ijv[:, 1]
random_state = np.random.RandomState()
random_state.seed(ijv.astype(int).flatten())
kmeans = KMeans(n_clusters = self.max_points.value, tol=2,
random_state=random_state)
kmeans.fit(ijv[:, :2])
return kmeans.cluster_centers_[:, 0].astype(np.uint32),\
kmeans.cluster_centers_[:, 1].astype(np.uint32)
def get_weights(self, i, j, labels_mask):
'''Return the weights to assign each i,j point
Assign each pixel in the labels mask to the nearest i,j and return
the number of pixels assigned to each i,j
'''
#
# Create a mapping of chosen points to their index in the i,j array
#
total_skel = np.zeros(labels_mask.shape, int)
total_skel[i, j] = np.arange(1, len(i)+1)
#
# Compute the distance from each chosen point to all others in image,
# return the nearest point.
#
ii, jj = distance_transform_edt(
total_skel==0,
return_indices=True,
return_distances=False)
#
# Filter out all unmasked points
#
ii, jj = [x[labels_mask] for x in ii,jj]
if len(ii) == 0:
return np.zeros(0, np.int32)
#
# Use total_skel to look up the indices of the chosen points and
# bincount the indices.
#
result = np.zeros(len(i), np.int32)
bc = np.bincount(total_skel[ii, jj])[1:]
result[:len(bc)] = bc
return result
def display(self, workspace, figure):
'''Display the image confusion matrix & statistics'''
figure.set_subplots((3, 2))
for x, y, image, label in (
(0, 0, workspace.display_data.true_positives, "True positives"),
(0, 1, workspace.display_data.false_positives, "False positives"),
(1, 0, workspace.display_data.false_negatives, "False negatives"),
(1, 1, workspace.display_data.true_negatives, "True negatives")):
figure.subplot_imshow_bw(x, y, image, title=label,
sharexy = figure.subplot(0,0))
figure.subplot_table(2, 0,
workspace.display_data.statistics,
col_labels = ("Measurement", "Value"),
n_rows = 2)
def measurement_name(self, feature):
if self.obj_or_img == O_IMG:
name = '_'.join((C_IMAGE_OVERLAP, feature, self.test_img.value))
if self.obj_or_img == O_OBJ:
name = '_'.join((C_IMAGE_OVERLAP, feature,
self.object_name_GT.value,
self.object_name_ID.value))
return name
def get_categories(self, pipeline, object_name):
'''Return the measurement categories for an object'''
if object_name == cpmeas.IMAGE:
return [ C_IMAGE_OVERLAP ]
return []
def get_measurements(self, pipeline, object_name, category):
'''Return the measurements made for a category'''
if object_name == cpmeas.IMAGE and category == C_IMAGE_OVERLAP:
return self.all_features()
return []
def get_measurement_images(self, pipeline, object_name, category,
measurement):
'''Return the images that were used when making the measurement'''
if measurement in self.get_measurements(pipeline, object_name, category)\
and self.obj_or_img == O_IMG:
return [self.test_img.value]
return []
def get_measurement_scales(
self, pipeline, object_name, category, measurement, image_name):
'''Return a "scale" that captures the measurement parameters
pipeline - the module's pipeline
object_name - should be "Images"
category - measurement category
measurement - measurement feature name
image_name - ignored
The "scale" in this case is the combination of ground-truth objects and
test objects.
'''
if (object_name == cpmeas.IMAGE and category == C_IMAGE_OVERLAP and
measurement in FTR_ALL and self.obj_or_img == O_OBJ):
return ["_".join((self.object_name_GT.value,
self.object_name_ID.value))]
return []
def all_features(self):
'''Return a list of all the features measured by this module'''
all_features = list(FTR_ALL)
if self.wants_emd:
all_features.append(FTR_EARTH_MOVERS_DISTANCE)
return all_features
def get_measurement_columns(self, pipeline):
'''Return database column information for each measurement'''
return [ (cpmeas.IMAGE,
self.measurement_name(feature),
cpmeas.COLTYPE_FLOAT)
for feature in self.all_features()]
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if from_matlab:
# Variable revision # wasn't in Matlab file
# All settings were identical to CP 2.0 v 1
from_matlab = False
variable_revision_number = 1
if variable_revision_number == 1:
#no object choice before rev 2
old_setting_values = setting_values
setting_values = [
O_IMG, old_setting_values[0], old_setting_values[1],
"None", "None", "None", "None"]
variable_revision_number = 2
if variable_revision_number == 2:
#
# Removed images associated with objects from the settings
#
setting_values = setting_values[:4] + setting_values[5:6]
variable_revision_number = 3
if variable_revision_number == 3:
#
# Added earth mover's distance
#
setting_values = setting_values + [
cps.NO, # wants_emd
250, # max points
DM_KMEANS, # decimation method
250, # max distance
cps.NO # penalize missing
]
variable_revision_number = 4
return setting_values, variable_revision_number, from_matlab
|
gpl-2.0
|
abhishekkrthakur/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
31
|
6002
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
"""
Test that SparsePCA won't return NaN when there is 0 feature in all
samples.
"""
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
davidgbe/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
278
|
3402
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
MohammedWasim/scikit-learn
|
sklearn/manifold/isomap.py
|
229
|
7169
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
IndraVikas/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
ndingwall/scikit-learn
|
benchmarks/bench_hist_gradient_boosting_higgsboson.py
|
12
|
4210
|
from urllib.request import urlretrieve
import os
from gzip import GzipFile
from time import time
import argparse
import numpy as np
import pandas as pd
from joblib import Memory
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score
# To use this experimental feature, we need to explicitly ask for it:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import (
get_equivalent_estimator)
parser = argparse.ArgumentParser()
parser.add_argument('--n-leaf-nodes', type=int, default=31)
parser.add_argument('--n-trees', type=int, default=10)
parser.add_argument('--lightgbm', action="store_true", default=False)
parser.add_argument('--xgboost', action="store_true", default=False)
parser.add_argument('--catboost', action="store_true", default=False)
parser.add_argument('--learning-rate', type=float, default=1.)
parser.add_argument('--subsample', type=int, default=None)
parser.add_argument('--max-bins', type=int, default=255)
parser.add_argument('--no-predict', action="store_true", default=False)
parser.add_argument('--cache-loc', type=str, default='/tmp')
args = parser.parse_args()
HERE = os.path.dirname(__file__)
URL = ("https://archive.ics.uci.edu/ml/machine-learning-databases/00280/"
"HIGGS.csv.gz")
m = Memory(location=args.cache_loc, mmap_mode='r')
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
subsample = args.subsample
lr = args.learning_rate
max_bins = args.max_bins
@m.cache
def load_data():
filename = os.path.join(HERE, URL.rsplit('/', 1)[-1])
if not os.path.exists(filename):
print(f"Downloading {URL} to {filename} (2.6 GB)...")
urlretrieve(URL, filename)
print("done.")
print(f"Parsing {filename}...")
tic = time()
with GzipFile(filename) as f:
df = pd.read_csv(f, header=None, dtype=np.float32)
toc = time()
print(f"Loaded {df.values.nbytes / 1e9:0.3f} GB in {toc - tic:0.3f}s")
return df
def fit(est, data_train, target_train, libname):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test, target_test):
if args.no_predict:
return
tic = time()
predicted_test = est.predict(data_test)
predicted_proba_test = est.predict_proba(data_test)
toc = time()
roc_auc = roc_auc_score(target_test, predicted_proba_test[:, 1])
acc = accuracy_score(target_test, predicted_test)
print(f"predicted in {toc - tic:.3f}s, "
f"ROC AUC: {roc_auc:.4f}, ACC: {acc :.4f}")
df = load_data()
target = df.values[:, 0]
data = np.ascontiguousarray(df.values[:, 1:])
data_train, data_test, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
if subsample is not None:
data_train, target_train = data_train[:subsample], target_train[:subsample]
n_samples, n_features = data_train.shape
print(f"Training set with {n_samples} records with {n_features} features.")
est = HistGradientBoostingClassifier(loss='binary_crossentropy',
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=1)
fit(est, data_train, target_train, 'sklearn')
predict(est, data_test, target_test)
if args.lightgbm:
est = get_equivalent_estimator(est, lib='lightgbm')
fit(est, data_train, target_train, 'lightgbm')
predict(est, data_test, target_test)
if args.xgboost:
est = get_equivalent_estimator(est, lib='xgboost')
fit(est, data_train, target_train, 'xgboost')
predict(est, data_test, target_test)
if args.catboost:
est = get_equivalent_estimator(est, lib='catboost')
fit(est, data_train, target_train, 'catboost')
predict(est, data_test, target_test)
|
bsd-3-clause
|
huffpostdata/python-pollster
|
pollster/api_client.py
|
1
|
25298
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
from . import models
from .rest import RESTClientObject
from .rest import ApiException
import io
import os
import re
import json
import mimetypes
import sys
import tempfile
import threading
from datetime import datetime
from datetime import date
import pandas
# python 2 and python 3 compatibility library
from six import PY3, integer_types, iteritems, text_type, StringIO
from six.moves.urllib.parse import quote
from .configuration import Configuration
class ApiClient(object):
"""
Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param host: The base path for the server to call.
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to the API.
"""
def __init__(self, host=None, header_name=None, header_value=None, cookie=None):
"""
Constructor of the class.
"""
self.rest_client = RESTClientObject()
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
if host is None:
self.host = Configuration().host
else:
self.host = host
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/2.0.0/python'
@property
def user_agent(self):
"""
Gets user agent.
"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""
Sets user agent.
"""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None,
pandas_read_table_kwargs=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
resource_path = resource_path.replace(
'{%s}' % k, quote(str(v)))
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.host + resource_path
# perform request and return response
response_data = self.request(method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if header_params['Accept'] == 'text/tab-separated-values':
kwargs = pandas_read_table_kwargs
if kwargs is None: kwargs = {}
return_data = pandas.read_table(StringIO(response_data.data), **kwargs)
elif response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if callback:
callback(return_data) if _return_http_data_only else callback((return_data, response_data.status, response_data.getheaders()))
elif _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status, response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
types = (str, float, bool, bytes) + tuple(integer_types) + (text_type,)
if isinstance(obj, type(None)):
return None
elif isinstance(obj, types):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
else:
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if "file" == response_type:
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""
Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in iteritems(data)}
# convert str to class
# for native types
if klass in ['int', 'float', 'bool',
"date", 'datetime', "object"]:
klass = eval(klass)
elif klass == 'str':
klass = text_type
elif klass == 'long':
klass = int if PY3 else long
# for model types
else:
klass = eval('models.' + klass)
if klass in integer_types or klass in (float, text_type, bool):
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == date:
return self.__deserialize_date(data)
elif klass == datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None,
pandas_read_table_kwargs=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, define a function for callback.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param callback function: Callback function for asynchronous request.
If provide this parameter,
the request will be called asynchronously.
:param pandas_read_table_kwargs: if header_params['Accept'] == 'text/tab-separated-values',
the kwargs to pass to pandas.read_table() when parsing the response.
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If provide parameter callback,
the request will be called asynchronously.
The method will return the request thread.
If parameter callback is None,
then the method will return the response directly.
"""
if callback is None:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings, callback,
pandas_read_table_kwargs,
_return_http_data_only, collection_formats, _preload_content, _request_timeout)
else:
thread = threading.Thread(target=self.__call_api,
args=(resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
callback, pandas_read_table_kwargs, _return_http_data_only,
collection_formats, _preload_content, _request_timeout))
thread.start()
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True, _request_timeout=None):
"""
Makes the HTTP request using RESTClient.
"""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""
Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in iteritems(params) if isinstance(params, dict) else params:
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = mimetypes.\
guess_type(filename)[0] or 'application/octet-stream'
params.append(tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = list(map(lambda x: x.lower(), accepts))
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = list(map(lambda x: x.lower(), content_types))
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
config = Configuration()
if not auth_settings:
return
for auth in auth_settings:
auth_setting = config.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
config = Configuration()
fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.\
search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool. unicode instead of str in python2.
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = unicode(data)
except TypeError:
value = data
return value
def __deserialize_object(self, value):
"""
Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""
Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a date object"
.format(string)
)
def __deserialize_datatime(self, string):
"""
Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a datetime object".
format(string)
)
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.swagger_types:
return data
for attr, attr_type in iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data\
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, '_' + attr, self.__deserialize(value, attr_type))
return instance
|
bsd-2-clause
|
sanketloke/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
91
|
3522
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
jorik041/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
142
|
6276
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
iABC2XYZ/abc
|
Epics/testSpeed.py
|
1
|
1081
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 16:19:01 2017
@author: p
"""
import matplotlib.pyplot as plt
import numpy as np
plt.close('all')
cHV=np.round(np.random.random((14))*300-150)/10.
cHV_BK=cHV
plt.figure('cHV')
plt.plot(cHV,'-r')
flagHV=np.ones((14))
Amp=3.5
nFresh=30
for iTotal in xrange(200):
if iTotal==0:
cHV=np.round(np.random.random((14))*300-150)/10.
flagHV=np.ones((14))*np.sign(np.random.random((14))-0.5)
else:
if iTotal % np.round(nFresh/4)==nFresh/8:
flagHV=np.ones((14))*np.sign(np.random.random((14))-0.5)
cHV+=(np.random.random((14)))*Amp*flagHV
flagHV[cHV>15]=-1
flagHV[cHV<-15]=1
cHV[cHV>15]=30-cHV[cHV>15]
cHV[cHV<-15]=-30-cHV[cHV<-15]
flagHV[cHV>15]=-1
flagHV[cHV<-15]=1
cHV[cHV>15]=30-cHV[cHV>15]
cHV[cHV<-15]=-30-cHV[cHV<-15]
plt.figure('cHV_update')
plt.clf()
#plt.hold
plt.plot(cHV_BK,'-r')
plt.plot(cHV,'-b')
plt.pause(0.1)
plt.plot(cHV_BK,'-r')
|
gpl-3.0
|
saketkc/statsmodels
|
statsmodels/tsa/ar_model.py
|
20
|
34034
|
from __future__ import division
from statsmodels.compat.python import iteritems, range, string_types, lmap
import numpy as np
from numpy import dot, identity
from numpy.linalg import inv, slogdet
from scipy.stats import norm
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.model as base
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.base.datetools import _index_date
__all__ = ['AR']
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def _check_ar_start(start, k_ar, method, dynamic):
if (method == 'cmle' or dynamic) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _validate(start, k_ar, dates, method):
"""
Checks the date and then returns an integer
"""
from datetime import datetime
if isinstance(start, (string_types, datetime)):
start_date = start
start = _index_date(start, dates)
if 'mle' not in method and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE or "
"dynamic forecast. Got %s" % start_date)
return start
def _ar_predict_out_of_sample(y, params, p, k_trend, steps, start=0):
mu = params[:k_trend] or 0 # only have to worry about constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(p + steps) # this is one too big but doesn't matter
if start:
endog[:p] = y[start-p:start]
else:
endog[:p] = y[-p:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i + p] = fcast
return forecast
class AR(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : "Autoregressive AR(p) model",
"params" : """endog : array-like
1-d endogenous response variable. The independent variable.""",
"extra_params" : base._missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(AR, self).__init__(endog, None, dates, freq, missing=missing)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:, None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p, 1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat, T_mat)),
dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end): # iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat, alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat, P), Z_mat.T), Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K, v_mat)
L = T_mat - dot(K, Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
#P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start - 1: # only record if we ask for it
predictedvalues[i + 1 - start] = dot(Z_mat, alpha)
def _get_predict_start(self, start, dynamic):
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if method == 'mle' and not dynamic:
start = 0
else: # can't do presample fit for cmle or dynamic
start = k_ar
elif isinstance(start, int):
start = super(AR, self)._get_predict_start(start)
else: # should be a date
start = _validate(start, k_ar, self.data.dates, method)
start = super(AR, self)._get_predict_start(start)
_check_ar_start(start, k_ar, method, dynamic)
self._set_predict_start_date(start)
return start
def predict(self, params, start=None, end=None, dynamic=False):
"""
Returns in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
if start - end > 1:
raise ValueError("end is before start")
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += end - start + 1
return _ar_predict_out_of_sample(endog, params, k_ar,
k_trend, out_of_sample, start)
predictedvalues = np.zeros(end + 1 - start)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
endog[:k_ar] - mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# just do the whole thing and truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params,
k_ar, k_trend,
out_of_sample)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, p1):
Vpinv[i-1, i-1:] = np.correlate(params0, params0[:i],
old_behavior=False)[:-1]
Vpinv[i-1, i-1:] -= np.correlate(params0[-i:], params0,
old_behavior=False)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze() - np.dot(X, params))
sigma2 = ssr/nobs
return (-nobs/2 * (np.log(2 * np.pi) + np.log(sigma2)) -
ssr/(2 * sigma2))
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
diffp = yp - mup[:, None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1] # TODO: add check for singularity
loglike = -1/2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) -
logdet + diffpVpinv / sigma2 + ssr / sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic, trend='c', method='mle'):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
k = self.k_trend # k_trend set in _stackX
k = max(1, k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k, maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k, res in iteritems(results))
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag, k - 1, -1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag - lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=35, disp=-1)
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle', 'yw', 'mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic', 'bic', 'hqic', 't-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs # needed for predict fcasterr
elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError("Length of start params is %d. There"
" are %d parameters." %
(len(start_params), k_trend + k_ar))
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == 'mle' and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
return ARResultsWrapper(arfit)
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lutkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the
pre-sample residuals are calculated using fittedvalues from the Kalman
Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,
'ct' = 1, etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = trendorder
#TODO: cmle vs mle?
self.df_model = k_ar + k_trend
self.df_resid = self.model.df_resid = n_totobs - self.df_model
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1. / self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma def.
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess)))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.df_model)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs *
(1 + self.df_model))
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
df_model = self.df_model
#Lutkepohl
return ((nobs+df_model)/(nobs-df_model))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, dynamic=False):
params = self.params
predictedvalues = self.model.predict(params, start, end, dynamic)
return predictedvalues
#start = self.model._get_predict_start(start)
#end, out_of_sample = self.model._get_predict_end(end)
##TODO: return forecast errors and confidence intervals
#from statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = (""" confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval."""
).split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults)
if __name__ == "__main__":
import statsmodels.api as sm
sunspots = sm.datasets.sunspots.load()
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DateRange(start=d1.datetime,
periods=len(sunspots.endog), timeRule='A@DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700, 1700 + len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.TimeSeries(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(lmap(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.TimeSeries(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460, 457, 452, 459, 462, 459, 463, 479, 493, 490.])
w = np.diff(IBM)
theta = .5
|
bsd-3-clause
|
OSSHealth/ghdata
|
augur/metrics/insight.py
|
1
|
1339
|
#SPDX-License-Identifier: MIT
"""
Metrics that provide data about with insight detection and reporting
"""
import sqlalchemy as s
import pandas as pd
from augur.util import register_metric
@register_metric(type="repo_group_only")
def top_insights(self, repo_group_id, num_repos=6):
"""
Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date)
:return: DataFrame with top insights across all repos
"""
topInsightsSQL = s.sql.text("""
SELECT rg_name, repo.repo_group_id, repo_insights.repo_id, repo_git, ri_metric, ri_field, ri_value AS value,
ri_date AS date, ri_fresh AS discovered
FROM repo_insights JOIN repo ON repo.repo_id = repo_insights.repo_id JOIN repo_groups ON repo.repo_group_id = repo_groups.repo_group_id
WHERE repo_insights.repo_id IN (
SELECT repo_id
FROM repo
WHERE repo_group_id = :repo_group_id
AND repo_id IN (SELECT repo_id FROM repo_insights GROUP BY repo_id, ri_id HAVING 304 > count(repo_insights.repo_id) ORDER BY ri_id desc)
LIMIT :num_repos
)
""")
results = pd.read_sql(topInsightsSQL, self.database, params={'repo_group_id': repo_group_id, 'num_repos': num_repos})
return results
|
mit
|
evgchz/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
7
|
33931
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_almost_equal(roc_auc,
ignore_warnings(auc_score)(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
"""Test to ensure that we don't return spurious repeating thresholds.
Duplicated thresholds can arise due to machine precision issues.
"""
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
"""Test that roc_auc_score function returns an error when trying
to compute AUC for non-binary class values.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
f = ignore_warnings(auc_score)
roc_auc = f(y_true, probas_pred)
roc_auc_scaled = f(y_true, 100 * probas_pred)
roc_auc_shifted = f(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
"""Check on several small example that it works """
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
"""Check tie handling in score"""
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
""" Check that Label ranking average precision works for various"""
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
|
bsd-3-clause
|
YerevaNN/mimic3-benchmarks
|
mimic3benchmark/scripts/create_length_of_stay.py
|
1
|
3792
|
from __future__ import absolute_import
from __future__ import print_function
import os
import argparse
import numpy as np
import pandas as pd
import random
random.seed(49297)
from tqdm import tqdm
def process_partition(args, partition, sample_rate=1.0, shortest_length=4.0, eps=1e-6):
output_dir = os.path.join(args.output_path, partition)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
xty_triples = []
patients = list(filter(str.isdigit, os.listdir(os.path.join(args.root_path, partition))))
for patient in tqdm(patients, desc='Iterating over patients in {}'.format(partition)):
patient_folder = os.path.join(args.root_path, partition, patient)
patient_ts_files = list(filter(lambda x: x.find("timeseries") != -1, os.listdir(patient_folder)))
for ts_filename in patient_ts_files:
with open(os.path.join(patient_folder, ts_filename)) as tsfile:
lb_filename = ts_filename.replace("_timeseries", "")
label_df = pd.read_csv(os.path.join(patient_folder, lb_filename))
# empty label file
if label_df.shape[0] == 0:
print("\n\t(empty label file)", patient, ts_filename)
continue
los = 24.0 * label_df.iloc[0]['Length of Stay'] # in hours
if pd.isnull(los):
print("\n\t(length of stay is missing)", patient, ts_filename)
continue
ts_lines = tsfile.readlines()
header = ts_lines[0]
ts_lines = ts_lines[1:]
event_times = [float(line.split(',')[0]) for line in ts_lines]
ts_lines = [line for (line, t) in zip(ts_lines, event_times)
if -eps < t < los + eps]
event_times = [t for t in event_times
if -eps < t < los + eps]
# no measurements in ICU
if len(ts_lines) == 0:
print("\n\t(no events in ICU) ", patient, ts_filename)
continue
sample_times = np.arange(0.0, los + eps, sample_rate)
sample_times = list(filter(lambda x: x > shortest_length, sample_times))
# At least one measurement
sample_times = list(filter(lambda x: x > event_times[0], sample_times))
output_ts_filename = patient + "_" + ts_filename
with open(os.path.join(output_dir, output_ts_filename), "w") as outfile:
outfile.write(header)
for line in ts_lines:
outfile.write(line)
for t in sample_times:
xty_triples.append((output_ts_filename, t, los - t))
print("Number of created samples:", len(xty_triples))
if partition == "train":
random.shuffle(xty_triples)
if partition == "test":
xty_triples = sorted(xty_triples)
with open(os.path.join(output_dir, "listfile.csv"), "w") as listfile:
listfile.write('stay,period_length,y_true\n')
for (x, t, y) in xty_triples:
listfile.write('{},{:.6f},{:.6f}\n'.format(x, t, y))
def main():
parser = argparse.ArgumentParser(description="Create data for length of stay prediction task.")
parser.add_argument('root_path', type=str, help="Path to root folder containing train and test sets.")
parser.add_argument('output_path', type=str, help="Directory where the created data should be stored.")
args, _ = parser.parse_known_args()
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
process_partition(args, "test")
process_partition(args, "train")
if __name__ == '__main__':
main()
|
mit
|
JT5D/scikit-learn
|
sklearn/ensemble/partial_dependence.py
|
5
|
14809
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
from sklearn.externals.six.moves import zip
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import array2d
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(map(lambda x: 0.0 <= x <= 1.0, percentiles)):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = array2d(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in xrange(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = array2d(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = map(str, range(gbrt.n_features))
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
names.append([feature_names[i] for i in fxs])
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(map(np.size, axes)).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
bsd-3-clause
|
and2egg/philharmonic
|
philharmonic/energy_meter/simple_visualiser.py
|
2
|
1414
|
'''
Created on Jun 15, 2012
@author: kermit
'''
#from pylab import *
import matplotlib.pyplot as plt
import numpy as np
from haley_api import Wattmeter
from continuous_energy_meter import ContinuousEnergyMeter
from runner_til_keypressed import RunnerTilKeypressed
machines = ["snowwhite", "grumpy"]
metrics = ["active_power", "apparent_power"]
def draw_current_state():
plt.hold(True)
en_meter = Wattmeter()
colors = ["r", "b"]
#machines = ["grumpy", "snowwhite", "sneezy"]
results = en_meter.multiple(machines, metrics)
mat = np.matrix(results[1:])
positions = np.arange(0,len(machines)*len(metrics),len(metrics))
print(mat)
rows = len(np.array(mat[:,0].T)[0])
for i in range(rows):
data_row = np.array(mat[i, :])[0]
plt.bar(positions, data_row, color = colors[i])
positions = [position+1 for position in positions]
plt.show()
def draw_state_long():
# gather data
#machines = ["grumpy", "snowwhite", "sneezy"]
interval = 1 # seconds
cont_en_meter = ContinuousEnergyMeter(machines, metrics, interval)
runner = RunnerTilKeypressed()
runner.run(cont_en_meter)
# draw it
print ("Gonna draw this:")
data = cont_en_meter.get_all_data()
print(data)
#plt.plot(data)
plt.figure()
data.plot()
#plt.show()
if __name__ == '__main__':
#draw_current_state()
draw_state_long()
|
gpl-3.0
|
bavardage/statsmodels
|
statsmodels/nonparametric/_kernel_base.py
|
3
|
17942
|
"""
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, basestring):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in xrange(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in xrange(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<http://packages.python.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in xrange(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
|
bsd-3-clause
|
EPFL-LCN/neuronaldynamics-exercises
|
neurodynex3/cable_equation/passive_cable.py
|
1
|
6153
|
"""
Implements compartmental model of a passive cable. See Neuronal Dynamics
`Chapter 3 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch3.S2.html>`_
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
from neurodynex3.tools import input_factory
import matplotlib.pyplot as plt
import numpy as np
# integration time step in milliseconds
b2.defaultclock.dt = 0.01 * b2.ms
# DEFAULT morphological and electrical parameters
CABLE_LENGTH = 500. * b2.um # length of dendrite
CABLE_DIAMETER = 2. * b2.um # diameter of dendrite
R_LONGITUDINAL = 0.5 * b2.kohm * b2.mm # Intracellular medium resistance
R_TRANSVERSAL = 1.25 * b2.Mohm * b2.mm ** 2 # cell membrane resistance (->leak current)
E_LEAK = -70. * b2.mV # reversal potential of the leak current (-> resting potential)
CAPACITANCE = 0.8 * b2.uF / b2.cm ** 2 # membrane capacitance
DEFAULT_INPUT_CURRENT = input_factory.get_step_current(2000, 3000, unit_time=b2.us, amplitude=0.2 * b2.namp)
DEFAULT_INPUT_LOCATION = [CABLE_LENGTH / 3] # provide an array of locations
# print("Membrane Timescale = {}".format(R_TRANSVERSAL*CAPACITANCE))
def simulate_passive_cable(current_injection_location=DEFAULT_INPUT_LOCATION, input_current=DEFAULT_INPUT_CURRENT,
length=CABLE_LENGTH, diameter=CABLE_DIAMETER,
r_longitudinal=R_LONGITUDINAL,
r_transversal=R_TRANSVERSAL, e_leak=E_LEAK, initial_voltage=E_LEAK,
capacitance=CAPACITANCE, nr_compartments=200, simulation_time=5 * b2.ms):
"""Builds a multicompartment cable and numerically approximates the cable equation.
Args:
t_spikes (int): list of spike times
current_injection_location (list): List [] of input locations (Quantity, Length): [123.*b2.um]
input_current (TimedArray): TimedArray of current amplitudes. One column per current_injection_location.
length (Quantity): Length of the cable: 0.8*b2.mm
diameter (Quantity): Diameter of the cable: 0.2*b2.um
r_longitudinal (Quantity): The longitudinal (axial) resistance of the cable: 0.5*b2.kohm*b2.mm
r_transversal (Quantity): The transversal resistance (=membrane resistance): 1.25*b2.Mohm*b2.mm**2
e_leak (Quantity): The reversal potential of the leak current (=resting potential): -70.*b2.mV
initial_voltage (Quantity): Value of the potential at t=0: -70.*b2.mV
capacitance (Quantity): Membrane capacitance: 0.8*b2.uF/b2.cm**2
nr_compartments (int): Number of compartments. Spatial discretization: 200
simulation_time (Quantity): Time for which the dynamics are simulated: 5*b2.ms
Returns:
(StateMonitor, SpatialNeuron): The state monitor contains the membrane voltage in a
Time x Location matrix. The SpatialNeuron object specifies the simulated neuron model
and gives access to the morphology. You may want to use those objects for
spatial indexing: myVoltageStateMonitor[mySpatialNeuron.morphology[0.123*b2.um]].v
"""
assert isinstance(input_current, b2.TimedArray), "input_current is not of type TimedArray"
assert input_current.values.shape[1] == len(current_injection_location),\
"number of injection_locations does not match nr of input currents"
cable_morphology = b2.Cylinder(diameter=diameter, length=length, n=nr_compartments)
# Im is transmembrane current
# Iext is injected current at a specific position on dendrite
EL = e_leak
RT = r_transversal
eqs = """
Iext = current(t, location_index): amp (point current)
location_index : integer (constant)
Im = (EL-v)/RT : amp/meter**2
"""
cable_model = b2.SpatialNeuron(morphology=cable_morphology, model=eqs, Cm=capacitance, Ri=r_longitudinal)
monitor_v = b2.StateMonitor(cable_model, "v", record=True)
# inject all input currents at the specified location:
nr_input_locations = len(current_injection_location)
input_current_0 = np.insert(input_current.values, 0, 0., axis=1) * b2.amp # insert default current: 0. [amp]
current = b2.TimedArray(input_current_0, dt=input_current.dt * b2.second)
for current_index in range(nr_input_locations):
insert_location = current_injection_location[current_index]
compartment_index = int(np.floor(insert_location / (length / nr_compartments)))
# next line: current_index+1 because 0 is the default current 0Amp
cable_model.location_index[compartment_index] = current_index + 1
# set initial values and run for 1 ms
cable_model.v = initial_voltage
b2.run(simulation_time)
return monitor_v, cable_model
def getting_started():
"""A simple code example to get started.
"""
current = input_factory.get_step_current(500, 510, unit_time=b2.us, amplitude=3. * b2.namp)
voltage_monitor, cable_model = simulate_passive_cable(
length=0.5 * b2.mm, current_injection_location=[0.1 * b2.mm], input_current=current,
nr_compartments=100, simulation_time=2 * b2.ms)
# provide a minimal plot
plt.figure()
plt.imshow(voltage_monitor.v / b2.volt)
plt.colorbar(label="voltage")
plt.xlabel("time index")
plt.ylabel("location index")
plt.title("vm at (t,x), raw data voltage_monitor.v")
plt.show()
if __name__ == "__main__":
getting_started()
|
gpl-2.0
|
johnh2o2/cuvarbase
|
docs/source/plots/bls_example.py
|
1
|
3320
|
import cuvarbase.bls as bls
import numpy as np
import matplotlib.pyplot as plt
def phase(t, freq, phi0=0.):
phi = (t * freq - phi0)
phi -= np.floor(phi)
return phi
def transit_model(t, freq, y0=0.0, delta=1., q=0.01, phi0=0.5):
phi = phase(t, freq, phi0=phi0)
transit = phi < q
y = y0 * np.ones_like(t)
y[transit] -= delta
return y
def data(ndata=100, baseline=1, freq=10, sigma=1., **kwargs):
t = baseline * np.sort(np.random.rand(ndata))
y = transit_model(t, freq, **kwargs)
dy = sigma * np.ones_like(t)
y += dy * np.random.randn(len(t))
return t, y, dy
def plot_bls_model(ax, y0, delta, q, phi0, **kwargs):
phi_plot = np.linspace(0, 1, 50./q)
y_plot = transit_model(phi_plot, 1., y0=y0,
delta=delta, q=q, phi0=phi0)
ax.plot(phi_plot, y_plot, **kwargs)
def plot_bls_sol(ax, t, y, dy, freq, q, phi0, **kwargs):
w = np.power(dy, -2)
w /= sum(w)
phi = phase(t, freq, phi0=phi0)
transit = phi < q
def ybar(mask):
return np.dot(w[mask], y[mask]) / sum(w[mask])
y0 = ybar(~transit)
delta = y0 - ybar(transit)
ax.scatter((phi[~transit] + phi0) % 1.0, y[~transit],
c='k', s=1, alpha=0.5)
ax.scatter((phi[transit] + phi0) % 1.0, y[transit],
c='r', s=1, alpha=0.5)
plot_bls_model(ax, y0, delta, q, phi0, **kwargs)
ax.set_xlim(0, 1)
ax.set_xlabel('$\phi$ ($f = %.3f$)' % (freq))
ax.set_ylabel('$y$')
# set the transit parameters
transit_kwargs = dict(freq=0.1,
q=0.1,
y0=10.,
sigma=0.002,
delta=0.05,
phi0=0.5)
# generate data with a transit
t, y, dy = data(ndata=300,
baseline=365.,
**transit_kwargs)
# set up search parameters
search_params = dict(qmin=1e-2,
qmax=0.5,
# The logarithmic spacing of q
dlogq=0.1,
# Number of overlapping phase bins
# to use for finding the best phi0
noverlap=3)
# derive baseline from the data for consistency
baseline = max(t) - min(t)
# df ~ qmin / baseline
df = search_params['qmin'] / baseline
fmin = 2. / baseline
fmax = 2.
nf = int(np.ceil((fmax - fmin) / df))
freqs = fmin + df * np.arange(nf)
bls_power, sols = bls.eebls_gpu(t, y, dy, freqs,
**search_params)
# best BLS fit
q_best, phi0_best = sols[np.argmax(bls_power)]
f_best = freqs[np.argmax(bls_power)]
# Plot results
f, (ax_bls, ax_true, ax_best) = plt.subplots(1, 3, figsize=(9, 3))
# Periodogram
ax_bls.plot(freqs, bls_power)
ax_bls.axvline(transit_kwargs['freq'],
ls=':', color='k', label="$f_0$")
ax_bls.axvline(f_best, ls=':', color='r',
label='BLS $f_{\\rm best}$')
ax_bls.set_xlabel('freq.')
ax_bls.set_ylabel('BLS power')
# True solution
plot_bls_sol(ax_true, t, y, dy,
transit_kwargs['freq'],
transit_kwargs['q'],
transit_kwargs['phi0'])
# Best-fit solution
plot_bls_sol(ax_best, t, y, dy,
f_best, q_best, phi0_best)
ax_true.set_title("True parameters")
ax_best.set_title("Best BLS parameters")
f.tight_layout()
plt.show()
|
gpl-3.0
|
amueller/pystruct
|
examples/multiclass_comparision_svm_struct.py
|
4
|
6536
|
"""
=================================
Comparing PyStruct and SVM-Struct
=================================
This example compares the performance of pystruct and SVM^struct on a
multi-class problem.
For the example to work, you need to install SVM^multiclass and
set the path in this file.
We are not using SVM^python, as that would be much slower, and we would
need to implement our own model in a SVM^python compatible way.
Instead, we just call the SVM^multiclass binary.
This comparison is only meaningful in the sense that both libraries
use general structured prediction solvers to solve the task.
The specialized implementation of the Crammer-Singer SVM in LibLinear
is much faster than either one.
For SVM^struct, the plot show CPU time as reportet by SVM^struct.
For pystruct, the plot shows the time spent in the fit function
according to time.clock.
Both models have disabled constraint caching. With constraint caching,
SVM^struct is somewhat faster, but PyStruct doesn't gain anything.
"""
import tempfile
import os
from time import clock
import numpy as np
from sklearn.datasets import dump_svmlight_file
from sklearn.datasets import fetch_mldata, load_iris, load_digits
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from pystruct.models import MultiClassClf
from pystruct.learners import OneSlackSSVM
# please set the path to the svm-struct multiclass binaries here
svmstruct_path = "/home/user/amueller/tools/svm_multiclass/"
class MultiSVM():
"""scikit-learn compatible interface for SVM^multi.
Dumps the data to a file and calls the binary.
"""
def __init__(self, C=1.):
self.C = C
def fit(self, X, y):
self.model_file = tempfile.mktemp(suffix='.svm')
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y + 1, train_data_file, zero_based=False)
C = self.C * 100. * len(X)
svmstruct_process = os.popen(svmstruct_path
+ "svm_multiclass_learn -w 3 -c %f %s %s"
% (C, train_data_file, self.model_file))
self.output_ = svmstruct_process.read().split("\n")
self.runtime_ = float(self.output_[-4].split(":")[1])
def _predict(self, X, y=None):
if y is None:
y = np.ones(len(X))
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y, train_data_file, zero_based=False)
prediction_file = tempfile.mktemp(suffix='.out')
os.system(svmstruct_path + "svm_multiclass_classify %s %s %s"
% (train_data_file, self.model_file, prediction_file))
return np.loadtxt(prediction_file)
def predict(self, X):
return self._predict(X)[:, 0] - 1
def score(self, X, y):
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
def decision_function(self, X):
return self._predict(X)[:, 1:]
def eval_on_data(X_train, y_train, X_test, y_test, svm, Cs):
# evaluate a single svm using varying C
accuracies, times = [], []
for C in Cs:
svm.C = C
start = clock()
svm.fit(X_train, y_train)
if hasattr(svm, "runtime_"):
times.append(svm.runtime_)
else:
times.append(clock() - start)
accuracies.append(accuracy_score(y_test, svm.predict(X_test)))
return accuracies, times
def plot_curves(curve_svmstruct, curve_pystruct, Cs, title="", filename=""):
# plot nice graphs comparing a value for the two implementations
plt.figure(figsize=(7, 4))
plt.plot(curve_svmstruct, "--", label="SVM^struct", c='red', linewidth=3)
plt.plot(curve_pystruct, "-.", label="PyStruct", c='blue', linewidth=3)
plt.xlabel("C")
plt.xticks(np.arange(len(Cs)), Cs)
plt.legend(loc='best')
plt.title(title)
if filename:
plt.savefig("%s" % filename, bbox_inches='tight')
def do_comparison(X_train, y_train, X_test, y_test, dataset):
# evaluate both svms on a given datasets, generate plots
Cs = 10. ** np.arange(-4, 1)
multisvm = MultiSVM()
svm = OneSlackSSVM(MultiClassClf(), tol=0.01)
accs_pystruct, times_pystruct = eval_on_data(X_train, y_train, X_test,
y_test, svm, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X_train, y_train,
X_test, y_test,
multisvm, Cs=Cs)
plot_curves(times_svmstruct, times_pystruct, Cs=Cs,
title="learning time (s) %s" % dataset,
filename="times_%s.pdf" % dataset)
plot_curves(accs_svmstruct, accs_pystruct, Cs=Cs,
title="accuracy %s" % dataset,
filename="accs_%s.pdf" % dataset)
def main():
if not os.path.exists(svmstruct_path + "svm_multiclass_learn"):
print("Please install SVM^multi and set the svmstruct_path variable "
"to run this example.")
return
datasets = ['iris', 'digits']
#datasets = ['iris', 'digits', 'usps', 'mnist']
# IRIS
if 'iris' in datasets:
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
do_comparison(X_train, y_train, X_test, y_test, "iris")
# DIGITS
if 'digits' in datasets:
digits = load_digits()
X, y = digits.data / 16., digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
do_comparison(X_train, y_train, X_test, y_test, "digits")
# USPS
if 'usps' in datasets:
digits = fetch_mldata("USPS")
X, y = digits.data, digits.target.astype(np.int) - 1
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
do_comparison(X_train, y_train, X_test, y_test, "USPS")
# MNIST
if 'mnist' in datasets:
digits = fetch_mldata("MNIST original")
X, y = digits.data / 255., digits.target.astype(np.int)
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
do_comparison(X_train, y_train, X_test, y_test, "MNIST")
plt.show()
if __name__ == "__main__":
main()
|
bsd-2-clause
|
peret/visualize-bovw
|
svm_visualization.py
|
1
|
1912
|
from visualization import Visualization
from sklearn.svm import LinearSVC
from vcd import VisualConceptDetection
import numpy as np
from datamanagers.CaltechManager import CaltechManager
from itertools import izip
import sys
import os
def get_image_title(prediction, real):
"""Returns a string that describes whether the prediction
is a true positive, false positive, etc. and with what
confidence the prediction is made.
Args:
prediction: List of predicted probabilities of
the respective classes.
real: List of corresponding correct labels.
"""
p = 1 if prediction > 0 else 0
result = ""
result += "True " if p == real else "False "
result += "positive" if p == 1 else "negative"
result += " - distance: %.5f" % prediction
return result
def get_svm_importances(coef):
"""Normalize the SVM weights."""
factor = 1.0 / np.linalg.norm(coef)
return (coef * factor).ravel()
if __name__ == "__main__":
svm = LinearSVC(C=0.1)
category = "Faces"
dataset = "all"
datamanager = CaltechManager()
datamanager.PATHS["RESULTS"] = os.path.join(datamanager.PATHS["BASE"], "results_Faces_LinearSVC_normalized")
vcd = VisualConceptDetection(svm, datamanager)
clf = vcd.load_object("Classifier", category)
importances = get_svm_importances(clf.coef_)
sample_matrix = vcd.datamanager.build_sample_matrix(dataset, category)
class_vector = vcd.datamanager.build_class_vector(dataset, category)
pred = clf.decision_function(sample_matrix)
del clf
image_titles = [get_image_title(prediction, real) for prediction, real in
izip(pred, class_vector)]
del class_vector
del sample_matrix
img_names = [f for f in vcd.datamanager.get_image_names(dataset, category)]
vis = Visualization(datamanager)
vis.visualize_images(img_names, importances, image_titles)
|
gpl-2.0
|
Remper/sociallink
|
align-train/pairwise_models/model.py
|
1
|
8491
|
import json
from os import path
from sklearn.utils import shuffle
import tensorflow as tf
import time
import numpy as np
class Model:
def __init__(self, name):
self._name = name
self._desc = name
self._graph = None
self._session = None
self._saver = None
self._ready = False
self._device = None
def desc(self, desc):
self._desc = desc
return self
def device(self, device):
self._device = device
return self
def _definition(self):
raise NotImplementedError("Definition function must be implemented for the model")
def _init(self):
if self._session is not None:
return
self._graph = self._definition()
if self._saver is None:
raise NotImplementedError("Definition wasn't properly implemented: missing saver")
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.45, visible_device_list=self._device)
self._session = tf.Session(graph=self._graph, config=tf.ConfigProto(gpu_options=gpu_options))
def train(self, train_prod, eval_prod=None):
raise NotImplementedError("Training function has to be defined")
def predict(self, features):
raise NotImplementedError("Predict function has to be defined")
def restore_from_file(self, filename):
self._init()
self._saver.restore(self._session, filename+'.cpkt')
self._ready = True
def _check_if_ready(self):
if not self._ready:
raise ValueError("Model isn't ready for prediction yet, train it or restore from file first")
def save_to_file(self, filename):
self._check_if_ready()
print("Saving model")
timestamp = time.time()
self._saver.save(sess=self._session, save_path=path.join(filename,'model.cpkt'))
print("Done in %.2fs" % (time.time() - timestamp))
@staticmethod
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
@staticmethod
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
class BatchProducer:
def __init__(self, filename):
self.feature_space = 0
self.labels = {}
self.set_size = 0
self.filename = filename
self.randomisation = True
print("Figuring out dataset metadata")
timestamp = time.time()
self.feature_space, self.labels, self.set_size = self._get_dataset_metadata()
print("Done in %.2fs" % (time.time() - timestamp))
self._print_stats()
def random_off(self):
print("Disabling randomisation for BatchProducer")
self.randomisation = False
def produce(self, batch_size: int) -> (np.ndarray, np.ndarray, int):
"""
Produces full batch ready to be input in NN
"""
pass
def _print_stats(self):
print("Feature space: %s. Classes: %d. Training set size: %d (%s)"
% (", ".join(["%s(%d)" % (id, length) for id, length in self.feature_space.items()]),
len(self.labels), self.set_size,
", ".join(["%d: %d" % (id, count) for id, count in self.labels.items()])))
def _train_set_reader(self):
raise NotImplementedError("You should implement a reader function for the batch producer")
def _get_dataset_metadata(self):
"""
Figures out amount of labels and features
"""
feature_space = dict()
labels = dict()
set_size = 0
for raw_label, features in self._train_set_reader():
if raw_label not in labels:
labels[raw_label] = 0
labels[raw_label] += 1
for subspace in features:
if subspace not in feature_space:
feature_space[subspace] = len(features[subspace])
continue
if feature_space[subspace] == len(features[subspace]):
continue
raise RuntimeError("Inconsistent feature sets in the dataset (subspace: %s, recorded: %d, found: %d)"
% (subspace, feature_space[subspace], features[subspace]))
set_size += 1
return feature_space, labels, set_size
class JSONBatchProducer(BatchProducer):
def __init__(self, filename):
super().__init__(filename)
def produce(self, batch_size: int) -> (dict, np.ndarray, int):
"""
Produces full batch ready to be input in NN
"""
labels = list()
batch = dict()
reader = self._train_set_reader()
cur_sample = 0
while True:
try:
while len(labels) < batch_size:
raw_label, features = reader.__next__()
label = np.zeros(len(self.labels), dtype=np.float32)
label[raw_label] = 1.0
labels.append(label)
self._append(batch, features)
yield self._stack_and_clean(batch), np.vstack(labels), cur_sample
cur_sample += len(batch)
labels = list()
except:
break
return self._stack_and_clean(batch), np.vstack(labels), cur_sample
def _stack_and_clean(self, bc: dict) -> dict:
stacked = dict()
for subspace in bc:
stacked[subspace] = np.vstack(bc[subspace])
bc[subspace] = []
return stacked
def _append(self, bc: dict, vector: dict) -> None:
for subspace in vector:
if subspace not in bc:
bc[subspace] = []
bc[subspace].append(vector[subspace])
def _train_set_reader(self) -> (int, dict):
"""
Reads training set one sample at the time
"""
with open(self.filename, 'r') as reader:
for line in reader:
row = line.rstrip().split('\t')
features = json.loads(row[1])
yield int(row[0]), features
class PreloadedJSONBatchProducer(JSONBatchProducer):
def __init__(self, filename):
super().__init__(filename)
print("Preloading dataset")
timestamp = time.time()
self.X, self.Y = self._load_dataset()
print("Done in %.2fs" % (time.time() - timestamp))
def index(self, batch, fr, to=None):
result = dict()
for subspace in batch:
if to is not None:
result[subspace] = batch[subspace][fr:to]
else:
result[subspace] = batch[subspace][fr:]
return result
def resample(self) -> (dict, np.ndarray):
keys = self.X.keys()
arrays = []
for key in keys:
arrays.append(self.X[key])
arrays.append(self.Y)
arrays = shuffle(*arrays)
order = 0
X = dict()
for key in keys:
X[key] = arrays[order]
order += 1
Y = arrays[order]
return X, Y
def produce(self, batch_size: int) -> (dict, np.ndarray, int):
"""
Produces full batch ready to be input in NN
"""
if self.randomisation:
X, Y = self.resample()
else:
X = self.X
Y = self.Y
size = Y.shape[0]
pointer = 0
while pointer + batch_size < size:
yield self.index(X, pointer, pointer + batch_size), Y[pointer:pointer + batch_size], pointer
pointer += batch_size
yield self.index(X, pointer), Y[pointer:], pointer
def _load_dataset(self):
"""
Reads training set one by one
"""
X = dict()
Y = []
X_batch = dict()
Y_batch = []
cutoff = 100000
for raw_label, features in self._train_set_reader():
label = np.zeros(len(self.labels), dtype=np.float32)
label[raw_label] = 1.0
self._append(X_batch, features)
Y_batch.append(label)
if len(X_batch) >= cutoff:
self._append(X, self._stack_and_clean(X_batch))
Y.append(np.vstack(Y_batch))
X_batch = []
Y_batch = []
if len(X_batch) > 0:
self._append(X, self._stack_and_clean(X_batch))
Y.append(np.vstack(Y_batch))
return self._stack_and_clean(X), np.vstack(Y)
|
apache-2.0
|
ceholden/yatsm
|
bench/benchmarks/algorithms/ccdc.py
|
3
|
6321
|
import inspect
import os
import numpy as np
import sklearn.linear_model
from yatsm.algorithms import CCDCesque
from ..bench_utils.example_timeseries import PixelTimeseries
n = 50
# Hack for goof up in API previous to v0.6.0
def version_kwargs(d):
""" Fix API calls for kwargs dict ``d`` that should have key ``estimator``
"""
argspec = inspect.getargspec(CCDCesque.__init__)
if 'estimator' in argspec.args:
# Spec updated to estimator={object': ..., 'fit': {}}
idx = [i for i, arg in enumerate(argspec.args)
if arg == 'estimator'][0] - 1
if isinstance(argspec.defaults[idx], dict):
return d
else:
d['estimator'] = {'object': d['estimator'], 'fit': {}}
elif 'lm' in argspec.args:
new_key, old_key = 'lm', 'estimator'
d[new_key] = d.pop(old_key)
return d
else:
raise KeyError('Neither "lm" nor "estimator" are keys in '
'CCDCesque.__init__')
class CCDCesquePixel263(PixelTimeseries):
""" Benchmark CCDC-esque algorithm on a single pixel with 263 observations
"""
def setup_cache(self):
super(CCDCesquePixel263, self).setup_cache()
kwargs = {
'test_indices': np.array([2, 3, 4, 5]),
'estimator': sklearn.linear_model.Lasso(alpha=[20]),
'consecutive': 5,
'threshold': 4,
'min_obs': 24,
'min_rmse': 100,
'retrain_time': 365.25,
'screening': 'RLM',
'screening_crit': 400.0,
'green_band': 1,
'swir1_band': 4,
'remove_noise': False,
'dynamic_rmse': False,
'slope_test': False,
'idx_slope': 1
}
return {'X': self.X, 'Y': self.Y, 'dates': self.dates,
'kwargs': kwargs}
def time_ccdcesque1(self, setup):
""" Bench with 'defaults' defined in setup with most tests turned off
"""
kwargs = version_kwargs(setup['kwargs'])
for i in range(n):
model = CCDCesque(**kwargs)
model.fit(setup['X'], setup['Y'], setup['dates'])
def time_ccdcesque2(self, setup):
""" Bench with remove_noise turned on
"""
kwargs = version_kwargs(setup['kwargs'])
kwargs.update({'remove_noise': True})
for i in range(n):
model = CCDCesque(**kwargs)
model.fit(setup['X'], setup['Y'], setup['dates'])
def time_ccdcesque3(self, setup):
""" Bench with remove_noise, dynamic_rmse turned on
"""
kwargs = version_kwargs(setup['kwargs'])
kwargs.update({'remove_noise': True,
'dynamic_rmse': True})
for i in range(n):
model = CCDCesque(**kwargs)
model.fit(setup['X'], setup['Y'], setup['dates'])
def time_ccdcesque4(self, setup):
""" Bench with remove_noise, dynamic_rmse, slope_test turned on
"""
kwargs = version_kwargs(setup['kwargs'])
kwargs.update({'remove_noise': True,
'dynamic_rmse': True,
'slope_test': True})
for i in range(n):
model = CCDCesque(**kwargs)
model.fit(setup['X'], setup['Y'], setup['dates'])
class CCDCesqueLine(object):
""" Benchmark CCDC-esque algorithm on a line with TODO observations
"""
example_data = os.path.join(
os.path.dirname(__file__),
'../../../tests/data/p013r030_r50_n423_b8.npz')
timeout = 360
def setup_cache(self):
dat = np.load(self.example_data)
X = dat['X']
Y = dat['Y']
dates = dat['dates']
kwargs = {
'test_indices': np.array([2, 3, 4, 5]),
'estimator': sklearn.linear_model.Lasso(alpha=[20]),
'consecutive': 5,
'threshold': 4,
'min_obs': 24,
'min_rmse': 100,
'retrain_time': 365.25,
'screening': 'RLM',
'screening_crit': 400.0,
'green_band': 1,
'swir1_band': 4,
'remove_noise': False,
'dynamic_rmse': False,
'slope_test': False,
'idx_slope': 1
}
return {'X': X, 'Y': Y, 'dates': dates, 'kwargs': kwargs}
def time_ccdcesque1(self, setup):
""" Bench with 'defaults' defined in setup with most tests turned off
"""
kwargs = version_kwargs(setup['kwargs'])
model = CCDCesque(**kwargs)
for col in range(setup['Y'].shape[-1]):
_Y, _X, _dates = setup['Y'][..., col], setup['X'], setup['dates']
mask = np.in1d(_Y[-1, :], [0, 1])
model.fit(_X[mask, :], _Y[:, mask], _dates[mask])
def time_ccdcesque2(self, setup):
""" Bench with remove_noise turned on
"""
kwargs = version_kwargs(setup['kwargs'])
kwargs.update({'remove_noise': True})
model = CCDCesque(**kwargs)
for col in range(setup['Y'].shape[-1]):
_Y, _X, _dates = setup['Y'][..., col], setup['X'], setup['dates']
mask = np.in1d(_Y[-1, :], [0, 1])
model.fit(_X[mask, :], _Y[:, mask], _dates[mask])
def time_ccdcesque3(self, setup):
""" Bench with remove_noise, dynamic_rmse turned on
"""
kwargs = version_kwargs(setup['kwargs'])
kwargs.update({'remove_noise': True,
'dynamic_rmse': True})
model = CCDCesque(**kwargs)
for col in range(setup['Y'].shape[-1]):
_Y, _X, _dates = setup['Y'][..., col], setup['X'], setup['dates']
mask = np.in1d(_Y[-1, :], [0, 1])
model.fit(_X[mask, :], _Y[:, mask], _dates[mask])
def time_ccdcesque4(self, setup):
""" Bench with remove_noise, dynamic_rmse, slope_test turned on
"""
kwargs = version_kwargs(setup['kwargs'])
kwargs.update({'remove_noise': True,
'dynamic_rmse': True,
'slope_test': True})
model = CCDCesque(**kwargs)
for col in range(setup['Y'].shape[-1]):
_Y, _X, _dates = setup['Y'][..., col], setup['X'], setup['dates']
mask = np.in1d(_Y[-1, :], [0, 1])
model.fit(_X[mask, :], _Y[:, mask], _dates[mask])
|
mit
|
ocefpaf/iris
|
lib/iris/tests/test_pandas.py
|
5
|
18054
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import copy
import datetime
import unittest
import cf_units
import cftime
import matplotlib.units
import numpy as np
# Importing pandas has the side-effect of messing with the formatters
# used by matplotlib for handling dates.
default_units_registry = copy.copy(matplotlib.units.registry)
try:
import pandas
except ImportError:
# Disable all these tests if pandas is not installed.
pandas = None
matplotlib.units.registry = default_units_registry
skip_pandas = unittest.skipIf(
pandas is None, 'Test(s) require "pandas", ' "which is not available."
)
if pandas is not None:
from iris.coords import DimCoord
from iris.cube import Cube
import iris.pandas
@skip_pandas
class TestAsSeries(tests.IrisTest):
"""Test conversion of 1D cubes to Pandas using as_series()"""
def test_no_dim_coord(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
expected_index = np.array([0, 1, 2, 3, 4])
self.assertArrayEqual(series, cube.data)
self.assertArrayEqual(series.index, expected_index)
def test_simple(self):
cube = Cube(np.array([0, 1, 2, 3, 4.4]), long_name="foo")
dim_coord = DimCoord([5, 6, 7, 8, 9], long_name="bar")
cube.add_dim_coord(dim_coord, 0)
expected_index = np.array([5, 6, 7, 8, 9])
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertArrayEqual(series.index, expected_index)
def test_masked(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4.4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data.astype("f").filled(np.nan))
def test_time_gregorian(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_coord = DimCoord(
[0, 100.1, 200.2, 300.3, 400.4],
long_name="time",
units="days since 2000-01-01 00:00",
)
cube.add_dim_coord(time_coord, 0)
expected_index = [
datetime.datetime(2000, 1, 1, 0, 0),
datetime.datetime(2000, 4, 10, 2, 24),
datetime.datetime(2000, 7, 19, 4, 48),
datetime.datetime(2000, 10, 27, 7, 12),
datetime.datetime(2001, 2, 4, 9, 36),
]
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertListEqual(list(series.index), expected_index)
def test_time_360(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_unit = cf_units.Unit(
"days since 2000-01-01 00:00", calendar=cf_units.CALENDAR_360_DAY
)
time_coord = DimCoord(
[0, 100.1, 200.2, 300.3, 400.4], long_name="time", units=time_unit
)
cube.add_dim_coord(time_coord, 0)
expected_index = [
cftime.Datetime360Day(2000, 1, 1, 0, 0),
cftime.Datetime360Day(2000, 4, 11, 2, 24),
cftime.Datetime360Day(2000, 7, 21, 4, 48),
cftime.Datetime360Day(2000, 11, 1, 7, 12),
cftime.Datetime360Day(2001, 2, 11, 9, 36),
]
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertArrayEqual(series.index, expected_index)
def test_copy_true(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_int32_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_int64_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_float_false(self):
cube = Cube(np.array([0, 1, 2, 3.3, 4]), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
_ = iris.pandas.as_series(cube, copy=False)
@skip_pandas
class TestAsDataFrame(tests.IrisTest):
"""Test conversion of 2D cubes to Pandas using as_data_frame()"""
def test_no_dim_coords(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
expected_index = [0, 1]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_no_x_coord(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
y_coord = DimCoord([10, 11], long_name="bar")
cube.add_dim_coord(y_coord, 0)
expected_index = [10, 11]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_no_y_coord(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
cube.add_dim_coord(x_coord, 1)
expected_index = [0, 1]
expected_columns = [10, 11, 12, 13, 14]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_simple(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
y_coord = DimCoord([15, 16], long_name="milk")
cube.add_dim_coord(x_coord, 1)
cube.add_dim_coord(y_coord, 0)
expected_index = [15, 16]
expected_columns = [10, 11, 12, 13, 14]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_masked(self):
data = np.ma.MaskedArray(
[[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]],
)
cube = Cube(data, long_name="foo")
expected_index = [0, 1]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data.astype("f").filled(np.nan))
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_time_gregorian(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="ts"
)
day_offsets = [0, 100.1, 200.2, 300.3, 400.4]
time_coord = DimCoord(
day_offsets, long_name="time", units="days since 2000-01-01 00:00"
)
cube.add_dim_coord(time_coord, 1)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
nanoseconds_per_day = 24 * 60 * 60 * 1000000000
days_to_2000 = 365 * 30 + 7
# pandas Timestamp class cannot handle floats in pandas <v0.12
timestamps = [
pandas.Timestamp(
int(nanoseconds_per_day * (days_to_2000 + day_offset))
)
for day_offset in day_offsets
]
self.assertTrue(all(data_frame.columns == timestamps))
self.assertTrue(all(data_frame.index == [0, 1]))
def test_time_360(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="ts"
)
time_unit = cf_units.Unit(
"days since 2000-01-01 00:00", calendar=cf_units.CALENDAR_360_DAY
)
time_coord = DimCoord(
[100.1, 200.2], long_name="time", units=time_unit
)
cube.add_dim_coord(time_coord, 0)
expected_index = [
cftime.Datetime360Day(2000, 4, 11, 2, 24),
cftime.Datetime360Day(2000, 7, 21, 4, 48),
]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_copy_true(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_int32_false(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int32),
long_name="foo",
)
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_int64_false(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int64),
long_name="foo",
)
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_float_false(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]]), long_name="foo"
)
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]],
)
cube = Cube(data, long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]],
)
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
_ = iris.pandas.as_data_frame(cube, copy=False)
def test_copy_false_with_cube_view(self):
data = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = Cube(data[:], long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
@skip_pandas
class TestSeriesAsCube(tests.IrisTest):
def test_series_simple(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(("pandas", "as_cube", "series_simple.cml")),
)
def test_series_object(self):
class Thing:
def __repr__(self):
return "A Thing"
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[Thing(), Thing(), Thing(), Thing(), Thing()],
)
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(("pandas", "as_cube", "series_object.cml")),
)
def test_series_masked(self):
series = pandas.Series(
[0, float("nan"), 2, np.nan, 4], index=[5, 6, 7, 8, 9]
)
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(("pandas", "as_cube", "series_masked.cml")),
)
def test_series_datetime_gregorian(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[
datetime.datetime(2001, 1, 1, 1, 1, 1),
datetime.datetime(2002, 2, 2, 2, 2, 2),
datetime.datetime(2003, 3, 3, 3, 3, 3),
datetime.datetime(2004, 4, 4, 4, 4, 4),
datetime.datetime(2005, 5, 5, 5, 5, 5),
],
)
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(
("pandas", "as_cube", "series_datetime_gregorian.cml")
),
)
def test_series_cftime_360(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[
cftime.datetime(2001, 1, 1, 1, 1, 1),
cftime.datetime(2002, 2, 2, 2, 2, 2),
cftime.datetime(2003, 3, 3, 3, 3, 3),
cftime.datetime(2004, 4, 4, 4, 4, 4),
cftime.datetime(2005, 5, 5, 5, 5, 5),
],
)
self.assertCML(
iris.pandas.as_cube(
series, calendars={0: cf_units.CALENDAR_360_DAY}
),
tests.get_result_path(
("pandas", "as_cube", "series_netcdfimte_360.cml")
),
)
def test_copy_true(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series)
cube.data[0] = 99
self.assertEqual(series[5], 0)
def test_copy_false(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series, copy=False)
cube.data[0] = 99
self.assertEqual(series[5], 99)
@skip_pandas
class TestDataFrameAsCube(tests.IrisTest):
def test_data_frame_simple(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_simple.cml")
),
)
def test_data_frame_nonotonic(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[10, 10],
columns=[12, 12, 14, 15, 16],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_nonotonic.cml")
),
)
def test_data_frame_masked(self):
data_frame = pandas.DataFrame(
[[0, float("nan"), 2, 3, 4], [5, 6, 7, np.nan, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_masked.cml")
),
)
def test_data_frame_multidim(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[0, 1],
columns=["col_1", "col_2", "col_3", "col_4", "col_5"],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_multidim.cml")
),
)
def test_data_frame_cftime_360(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[
cftime.datetime(2001, 1, 1, 1, 1, 1),
cftime.datetime(2002, 2, 2, 2, 2, 2),
],
columns=[10, 11, 12, 13, 14],
)
self.assertCML(
iris.pandas.as_cube(
data_frame, calendars={0: cf_units.CALENDAR_360_DAY}
),
tests.get_result_path(
("pandas", "as_cube", "data_frame_netcdftime_360.cml")
),
)
def test_data_frame_datetime_gregorian(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[
datetime.datetime(2001, 1, 1, 1, 1, 1),
datetime.datetime(2002, 2, 2, 2, 2, 2),
],
columns=[10, 11, 12, 13, 14],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_datetime_gregorian.cml")
),
)
def test_copy_true(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 0)
def test_copy_false(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame, copy=False)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 99)
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
Roboticmechart22/sms-tools
|
lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModelFrame.py
|
22
|
2075
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris, resample
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
mYst = resample(np.maximum(-200, mXr), mXr.size*stocf) # decimate the mag spectrum
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(2,1,1)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-100,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', lw=2, markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(2,1,2)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.6, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.0, label='mXr')
binFreq = (fs/2.0)*np.arange(mYst.size)/(mYst.size)
plt.plot(binFreq,mYst,'r', lw=1.5, label='mYst')
plt.axis([0,maxplotfreq,-100,max(mYh)+2])
plt.legend(prop={'size':15})
plt.title('mYh + mXr + mYst')
plt.tight_layout()
plt.savefig('hpsModelFrame.png')
plt.show()
|
agpl-3.0
|
srjit/fakenewschallange
|
code/modelsv2/train.py
|
1
|
8348
|
from tqdm import tqdm, tqdm_pandas
import pandas as pd
import string
import numpy as np
import datetime
import os
from functools import reduce
import gensim.models.keyedvectors as word2vec
from sklearn.model_selection import train_test_split
tqdm.pandas(tqdm())
__author__ = "Sreejith Sreekumar"
__email__ = "sreekumar.s@husky.neu.edu"
__version__ = "0.0.1"
bodies = "../../data/train_bodies.csv"
stances = "../../data/train_stances.csv"
content = pd.read_csv(bodies, sep=",")
headlines = pd.read_csv(stances, sep=",")
data = pd.merge(content, headlines, how="left", on="Body ID")
data["index"] = data.index
def preprocess(x):
translator = str.maketrans('','',string.punctuation)
x = x.replace("“","")
x = x.replace("”","")
x = x.replace("‘","")
x = x.replace("’","")
return x.lower().translate(translator).split()
data["content_tokens"] = data["articleBody"].progress_apply(lambda x: preprocess(x))
data["headline_tokens"] = data["Headline"].progress_apply(lambda x: preprocess(x))
data["content_len"] = data["content_tokens"].progress_apply(lambda x : len(x))
data["headline_len"] = data["headline_tokens"].progress_apply(lambda x : len(x))
# print("------ Data Statistcs -----")
# l = list(data["content_len"])
# print("Mean lengths of articles", reduce(lambda x, y: x + y, l) / len(l))
# print("Median lengths of articles", np.median(l))
# l = list(data["headline_len"])
# print("Mean lengths of headlines", reduce(lambda x, y: x + y, l) / len(l))
# print("Median lengths of headlines", np.median(l))
## Let's keep the article length 400
## Headline width be 20
sentences_articlebody = list(data["content_tokens"])
sentences_headlines = list(data["headline_tokens"])
vocabulary_articlebody = [item for sublist in sentences_articlebody for item in sublist]
vocabulary_headlines = [item for sublist in sentences_headlines for item in sublist]
vocabulary = list(set(vocabulary_headlines + vocabulary_articlebody))
def get_word_vectors(_vocabulary):
wordslist = list(_vocabulary)
limit = len(wordslist)
model_path = "/home/sree/code/dl101/sentiment/amazon-reviews/GoogleNews-vectors-negative300.bin"
model = word2vec.KeyedVectors.load_word2vec_format(model_path, binary=True)
print("Word2Vec loaded...")
invalid_words = []
def get_vector(word):
try:
return model[word]
except:
invalid_words.append(word)
return limit
wordvectors = np.zeros([len(wordslist), 300], dtype=np.float32)
for i, word in tqdm(enumerate(wordslist)):
wordvectors[i] = get_vector(word)
del model
return wordvectors, invalid_words
wordvectors, invalid_words = get_word_vectors(vocabulary)
# def get_vectors_of_document(words, sequence_len = 400):
# def get_index(word):
# if word in invalid_words:
# return len(vocabulary)
# try:
# return vocabulary.index(word)
# except:
# return len(vocabulary)
# doc_vec = np.zeros(sequence_len)
# sequence = [get_index(word) for word in words][:sequence_len]
# if(len(sequence) < sequence_len):
# sequence[len(sequence):sequence_len] = [0] * (sequence_len - len(sequence))
# return np.asarray(sequence)
# data["encoded_article"] = data["content_tokens"].progress_apply(lambda x : get_vectors_of_document(x))
# data["encoded_headline"] = data["headline_tokens"].progress_apply(lambda x : get_vectors_of_document(x, 20))
print("Setting the labels...")
data["label"] = data["Stance"].apply(lambda x: [1, 0, 0, 0] if x == 'agree' else ([0, 1, 0, 0] if x == 'discuss' else ([0, 0, 1, 0] if x == 'disagree' else [0, 0, 0, 1])))
import pickle
# with open("data.bin","wb") as f:
# pickle.dump(data, f)
#load vectorized data
with open("data.bin","rb") as f:
data = pickle.load(f)
train, test = train_test_split(data, test_size=0.2, random_state=55)
num_classes = 4
batch_size = 500
seq_len_article = 400
seq_len_headline = 20
num_dimensions = 300
input_size = len(train)
input_size_test = len(test)
from random import randint
def get_train_batch():
start_index = randint(0, input_size - batch_size)
end_index = start_index + batch_size
print("Next batch to train starting index: ", start_index)
batch_headline = (train['encoded_headline'][start_index: end_index]).tolist()
batch_article = (train['encoded_article'][start_index: end_index]).tolist()
labels = train['label'][start_index: end_index].tolist()
headlines = np.zeros([batch_size, seq_len_headline])
for i in range(batch_size):
headlines[i] = batch_headline[i]
articles = np.zeros([batch_size, seq_len_article])
for i in range(batch_size):
articles[i] = batch_article[i]
return headlines, articles, labels
def get_test_batch():
start_index = randint(0, input_size_test - batch_size)
end_index = start_index + batch_size
print("Next batch to train starting index: ", start_index)
batch_headline = (test['encoded_headline'][start_index: end_index]).tolist()
batch_article = (test['encoded_article'][start_index: end_index]).tolist()
labels = test['label'][start_index: end_index].tolist()
headlines = np.zeros([batch_size, seq_len_headline])
for i in range(batch_size):
headlines[i] = batch_headline[i]
articles = np.zeros([batch_size, seq_len_article])
for i in range(batch_size):
articles[i] = batch_article[i]
return headlines, articles, labels
headlines, articles, labels = get_train_batch()
import tensorflow as tf
tf.reset_default_graph()
lstmunits = 128
labels = tf.placeholder(tf.float32, [batch_size, num_classes])
article_input = tf.placeholder(tf.int32, [batch_size, seq_len_article])
article_data = tf.Variable(tf.zeros([batch_size, seq_len_article, num_dimensions]),dtype=tf.float32)
article_data = tf.nn.embedding_lookup(wordvectors, article_input)
headline_input = tf.placeholder(tf.int32, [batch_size, seq_len_headline])
headline_data = tf.Variable(tf.zeros([batch_size, seq_len_headline, num_dimensions]),dtype=tf.float32)
headline_data = tf.nn.embedding_lookup(wordvectors, headline_input)
merged_data = tf.concat([headline_data, article_data], axis=1)
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstmunits)
lstm_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell, output_keep_prob=0.75)
value, _ = tf.nn.dynamic_rnn(lstm_cell, merged_data, dtype=tf.float32)
weight = tf.Variable(tf.truncated_normal([lstmunits, num_classes]), dtype=tf.float32)
bias = tf.Variable(tf.constant(0.1, shape=[num_classes]))
value = tf.transpose(value, [1, 0, 2])
last = tf.gather(value, int(value.get_shape()[0]) - 1)
prediction = (tf.matmul(last, weight) + bias)
correctPred = tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correctPred, tf.float32))
accuracy = tf.reduce_mean(tf.cast(correctPred, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=labels))
optimizer = tf.train.AdamOptimizer().minimize(loss)
tf.summary.scalar('Loss', loss)
#tf.summary.scalar('Train Accuracy', accuracy)
tf.summary.scalar('Test Accuracy', accuracy)
merged = tf.summary.merge_all()
logdir = "tensorboard/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + "/"
sess = tf.InteractiveSession()
writer = tf.summary.FileWriter(logdir, sess.graph)
saver = tf.train.Saver()
iterations = 10000
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for i in range(iterations):
headlines, articles, _labels = get_train_batch();
sess.run(optimizer, {article_input: articles, headline_input: headlines, labels: _labels})
print("Epoch :", i+1)
if (i % 50 == 0):
headlines, articles, _labels = get_test_batch();
sess.run(accuracy, {article_input: articles, headline_input: headlines, labels: _labels})
summary = sess.run(merged, {article_input: articles, headline_input: headlines, labels: _labels})
writer.add_summary(summary, i)
#Save the network every 10,000 training iterations
if (i % 1000 == 0 and i != 0):
save_path = saver.save(sess, "models/pretrained_lstm.ckpt", global_step=i)
print("saved to %s" % save_path)
writer.close()
|
gpl-3.0
|
zihua/scikit-learn
|
examples/gaussian_process/plot_compare_gpr_krr.py
|
67
|
5191
|
"""
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using kernel ridge
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
|
bsd-3-clause
|
raoulbq/scipy
|
doc/source/tutorial/stats/plots/kde_plot3.py
|
132
|
1229
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
|
bsd-3-clause
|
justinbois/bebi103_utils
|
bebi103/deprecated/emcee.py
|
2
|
15081
|
import collections
import warnings
import numpy as np
import pandas as pd
import emcee
import ptemcee
def generic_log_posterior(log_prior, log_likelihood, params, logpargs=(),
loglargs=()):
"""
Generic log posterior for MCMC calculations
Parameters
----------
log_prior : function
Function to compute the log prior.
Call signature: log_prior(params, *logpargs)
log_likelihood : function
Function to compute the log prior.
Call signature: log_likelhood(params, *loglargs)
params : ndarray
Numpy array containing the parameters of the posterior.
logpargs : tuple, default ()
Tuple of parameters to be passed to log_prior.
loglargs : tuple, default ()
Tuple of parameters to be passed to log_likelihood.
Returns
-------
output : float
The logarithm of the posterior evaluated at `params`.
"""
# Compute log prior
lp = log_prior(params, *logpargs)
# If log prior is -inf, return that
if lp == -np.inf:
return -np.inf
# Compute and return posterior
return lp + log_likelihood(params, *loglargs)
def sampler_to_dataframe(sampler, columns=None):
"""
Convert output of an emcee sampler to a Pandas DataFrame.
Parameters
----------
sampler : emcee.EnsembleSampler or emcee.PTSampler instance
Sampler instance form which MCMC has already been run.
Returns
-------
output : DataFrame
Pandas DataFrame containing the samples. Each column is
a variable, except: 'lnprob' and 'chain' for an
EnsembleSampler, and 'lnlike', 'lnprob', 'beta_ind',
'beta', and 'chain' for a PTSampler. These contain obvious
values.
"""
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
if columns is None:
columns = list(range(sampler.chain.shape[-1]))
if isinstance(sampler, emcee.EnsembleSampler):
n_walkers, n_steps, n_dim = sampler.chain.shape
df = pd.DataFrame(data=sampler.flatchain, columns=columns)
df['lnprob'] = sampler.flatlnprobability
df['chain'] = np.concatenate([i * np.ones(n_steps, dtype=int)
for i in range(n_walkers)])
elif isinstance(sampler, ptemcee.sampler.Sampler):
n_temps, n_walkers, n_steps, n_dim = sampler.chain.shape
df = pd.DataFrame(
data=sampler.flatchain.reshape(
(n_temps * n_walkers * n_steps, n_dim)),
columns=columns)
df['lnlike'] = sampler.loglikelihood.flatten()
df['lnprob'] = sampler.logprobability.flatten()
beta_inds = [i * np.ones(n_steps * n_walkers, dtype=int)
for i, _ in enumerate(sampler.betas)]
df['beta_ind'] = np.concatenate(beta_inds)
df['beta'] = sampler.betas[df['beta_ind']]
chain_inds = [j * np.ones(n_steps, dtype=int)
for i, _ in enumerate(sampler.betas)
for j in range(n_walkers)]
df['chain'] = np.concatenate(chain_inds)
else:
raise RuntimeError('Invalid sample input.')
return df
def run_ensemble_emcee(log_post=None, n_burn=100, n_steps=100,
n_walkers=None, p_dict=None, p0=None, columns=None,
args=(), threads=None, thin=1, return_sampler=False,
return_pos=False):
"""
Run emcee.
Parameters
----------
log_post : function
The function that computes the log posterior. Must be of
the form log_post(p, *args), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
n_burn : int, default 100
Number of burn steps
n_steps : int, default 100
Number of MCMC samples to take
n_walkers : int
Number of walkers, ignored if p0 is None
p_dict : collections.OrderedDict
Each entry is a tuple with the function used to generate
starting points for the parameter and the arguments for
the function. The starting point function must have the
call signature f(*args_for_function, n_walkers). Ignored
if p0 is not None.
p0 : array
n_walkers by n_dim array of initial starting values.
p0[i,j] is the starting point for walk i along variable j.
If provided, p_dict is ignored.
columns : list of strings
Name of parameters. These will be the column headings in the
returned DataFrame. If None, either inferred from p_dict or
assigned sequential integers.
args : tuple
Arguments passed to log_post
threads : int
Number of cores to use in calculation
thin : int
The number of iterations to perform between saving the
state to the internal chain.
return_sampler : bool, default False
If True, return sampler as well as DataFrame with results.
return_pos : bool, default False
If True, additionally return position of the sampler.
Returns
-------
df : pandas.DataFrame
First columns give flattened MCMC chains, with columns
named with the variable being sampled as a string.
Other columns are:
'chain': ID of chain
'lnprob': Log posterior probability
sampler : emcee.EnsembleSampler instance, optional
The sampler instance.
pos : ndarray, shape (nwalkers, ndim), optional
Last position of the walkers.
"""
if p0 is None and p_dict is None:
raise RuntimeError('Must supply either p0 or p_dict.')
# Infer n_dim and n_walkers (and check inputs)
if p0 is None:
if n_walkers is None:
raise RuntimeError('n_walkers must be specified if p0 is None')
if type(p_dict) is not collections.OrderedDict:
raise RuntimeError('p_dict must be collections.OrderedDict.')
n_dim = len(p_dict)
else:
n_walkers, n_dim = p0.shape
if p_dict is not None:
warnings.RuntimeWarning('p_dict is being ignored.')
# Infer columns
if columns is None:
if p_dict is not None:
columns = list(p_dict.keys())
else:
columns = list(range(n_dim))
elif len(columns) != n_dim:
raise RuntimeError('len(columns) must equal number of parameters.')
# Check for invalid column names
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
# Build starting points of walkers
if p0 is None:
p0 = np.empty((n_walkers, n_dim))
for i, key in enumerate(p_dict):
p0[:, i] = p_dict[key][0](*(p_dict[key][1] + (n_walkers,)))
# Set up the EnsembleSampler instance
if threads is not None:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post,
args=args, threads=threads)
else:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post,
args=args)
# Do burn-in
if n_burn > 0:
pos, _, _ = sampler.run_mcmc(p0, n_burn, storechain=False)
else:
pos = p0
# Sample again, starting from end burn-in state
pos, _, _ = sampler.run_mcmc(pos, n_steps, thin=thin)
# Make DataFrame for results
df = sampler_to_dataframe(sampler, columns=columns)
# Set up return
return_vals = (df, sampler, pos)
return_bool = (True, return_sampler, return_pos)
ret = tuple([rv for rv, rb in zip(return_vals, return_bool) if rb])
if len(ret) == 1:
return ret[0]
return ret
def run_pt_emcee(log_like, log_prior, n_burn, n_steps, n_temps=None,
n_walkers=None, p_dict=None, p0=None, columns=None,
loglargs=(), logpargs=(), threads=None, thin=1,
return_lnZ=False, return_sampler=False, return_pos=False):
"""
Run emcee.
Parameters
----------
log_like : function
The function that computes the log likelihood. Must be of
the form log_like(p, *llargs), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
log_prior : function
The function that computes the log prior. Must be of
the form log_post(p, *lpargs), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
n_burn : int
Number of burn steps
n_steps : int
Number of MCMC samples to take
n_temps : int
The number of temperatures to use in PT sampling.
n_walkers : int
Number of walkers
p_dict : collections.OrderedDict
Each entry is a tuple with the function used to generate
starting points for the parameter and the arguments for
the function. The starting point function must have the
call signature f(*args_for_function, n_walkers). Ignored
if p0 is not None.
p0 : array
n_walkers by n_dim array of initial starting values.
p0[k,i,j] is the starting point for walk i along variable j
for temperature k. If provided, p_dict is ignored.
columns : list of strings
Name of parameters. These will be the column headings in the
returned DataFrame. If None, either inferred from p_dict or
assigned sequential integers.
args : tuple
Arguments passed to log_post
threads : int
Number of cores to use in calculation
thin : int
The number of iterations to perform between saving the
state to the internal chain.
return_lnZ : bool, default False
If True, additionally return lnZ and dlnZ.
return_sampler : bool, default False
If True, additionally return sampler.
return_pos : bool, default False
If True, additionally return position of the sampler.
Returns
-------
df : pandas.DataFrame
First columns give flattened MCMC chains, with columns
named with the variable being sampled as a string.
Other columns are:
'chain': ID of chain
'beta': Inverse temperature
'beta_ind': Index of beta in list of betas
'lnlike': Log likelihood
'lnprob': Log posterior probability (with beta multiplying
log likelihood)
lnZ : float, optional
ln Z(1), which is equal to the evidence of the
parameter estimation problem.
dlnZ : float, optional
The estimated error in the lnZ calculation.
sampler : emcee.PTSampler instance, optional
The sampler instance.
pos : ndarray, shape (ntemps, nwalkers, ndim), optional
Last position of the walkers.
"""
if p0 is None and p_dict is None:
raise RuntimeError('Must supply either p0 or p_dict.')
# Infer n_dim and n_walkers (and check inputs)
if p0 is None:
if n_walkers is None:
raise RuntimeError('n_walkers must be specified if p0 is None')
if type(p_dict) is not collections.OrderedDict:
raise RuntimeError('p_dict must be collections.OrderedDict.')
n_dim = len(p_dict)
else:
n_temps, n_walkers, n_dim = p0.shape
if p_dict is not None:
warnings.RuntimeWarning('p_dict is being ignored.')
# Infer columns
if columns is None:
if p_dict is not None:
columns = list(p_dict.keys())
else:
columns = list(range(n_dim))
elif len(columns) != n_dim:
raise RuntimeError('len(columns) must equal number of parameters.')
# Check for invalid column names
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
# Build starting points of walkers
if p0 is None:
p0 = np.empty((n_temps, n_walkers, n_dim))
for i, key in enumerate(p_dict):
p0[:, :, i] = p_dict[key][0](
*(p_dict[key][1] + ((n_temps, n_walkers),)))
# Set up the PTSampler instance
if threads is not None:
sampler = ptemcee.Sampler(n_walkers, n_dim, log_like, log_prior,
ntemps=n_temps, loglargs=loglargs,
logpargs=logpargs, threads=threads)
else:
sampler = ptemcee.Sampler(n_walkers, n_dim, log_like, log_prior,
ntemps=n_temps, loglargs=loglargs,
logpargs=logpargs)
# Do burn-in
if n_burn > 0:
pos, _, _ = sampler.run_mcmc(p0, iterations=n_burn, storechain=False)
else:
pos = p0
# Sample again, starting from end burn-in state
pos, _, _ = sampler.run_mcmc(pos, iterations=n_steps, thin=thin)
# Compute thermodynamic integral
lnZ, dlnZ = sampler.log_evidence_estimate(fburnin=0)
# Make DataFrame for results
df = sampler_to_dataframe(sampler, columns=columns)
# Set up return
return_vals = (df, lnZ, dlnZ, sampler, pos)
return_bool = (True, return_lnZ, return_lnZ, return_sampler, return_pos)
ret = tuple([rv for rv, rb in zip(return_vals, return_bool) if rb])
if len(ret) == 1:
return ret[0]
return ret
def lnZ(df_mcmc):
"""
Compute log Z(1) from PTMCMC traces stored in DataFrame.
Parameters
----------
df_mcmc : pandas DataFrame, as outputted from run_ptmcmc.
DataFrame containing output of a parallel tempering MCMC
run. Only need to contain columns pertinent to computing
ln Z, which are 'beta_int', 'lnlike', and 'beta'.
Returns
-------
output : float
ln Z as computed by thermodynamic integration. This is
equivalent to what is obtained by calling
`sampler.thermodynamic_integration_log_evidence(fburnin=0)`
where `sampler` is an emcee.PTSampler instance.
Notes
-----
.. This is useful when the DataFrame from a PTSampler is too
large to store in RAM.
"""
# Average the log likelihood over the samples
log_mean = np.zeros(len(df_mcmc['beta_ind'].unique()))
for i, b in enumerate(df_mcmc['beta_ind'].unique()):
log_mean[i] = df_mcmc['lnlike'][df_mcmc['beta_ind']==b].mean()
# Set of betas (temperatures)
betas = np.concatenate((np.array(df_mcmc['beta'].unique()), (0,)))
# Approximate quadrature
return np.dot(log_mean, -np.diff(betas))
|
mit
|
alekz112/statsmodels
|
examples/python/robust_models_0.py
|
33
|
2992
|
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
|
bsd-3-clause
|
rohanp/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
|
102
|
5177
|
"""
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
|
bsd-3-clause
|
IshankGulati/scikit-learn
|
benchmarks/bench_mnist.py
|
38
|
6799
|
"""
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
|
bsd-3-clause
|
gdementen/larray
|
larray/inout/pandas.py
|
2
|
15308
|
from __future__ import absolute_import, print_function
from itertools import product
import numpy as np
import pandas as pd
from larray.core.array import Array
from larray.core.axis import Axis, AxisCollection
from larray.core.constants import nan
from larray.util.misc import unique
from larray.util.compat import basestring, decode, bytes
def parse(s):
r"""
Used to parse the "folded" axis ticks (usually periods).
"""
# parameters can be strings or numbers
if isinstance(s, basestring):
s = s.strip()
low = s.lower()
if low == 'true':
return True
elif low == 'false':
return False
elif s.isdigit():
return int(s)
else:
try:
return float(s)
except ValueError:
return s
else:
return s
def index_to_labels(idx, sort=True):
r"""
Returns unique labels for each dimension.
"""
if isinstance(idx, pd.core.index.MultiIndex):
if sort:
return list(idx.levels)
else:
return [list(unique(idx.get_level_values(l))) for l in range(idx.nlevels)]
else:
assert isinstance(idx, pd.core.index.Index)
labels = list(idx.values)
return [sorted(labels) if sort else labels]
def cartesian_product_df(df, sort_rows=False, sort_columns=False, fill_value=nan, **kwargs):
idx = df.index
labels = index_to_labels(idx, sort=sort_rows)
if isinstance(idx, pd.core.index.MultiIndex):
if sort_rows:
new_index = pd.MultiIndex.from_product(labels)
else:
new_index = pd.MultiIndex.from_tuples(list(product(*labels)))
else:
if sort_rows:
new_index = pd.Index(labels[0], name=idx.name)
else:
new_index = idx
columns = sorted(df.columns) if sort_columns else list(df.columns)
# the prodlen test is meant to avoid the more expensive array_equal test
prodlen = np.prod([len(axis_labels) for axis_labels in labels])
if prodlen == len(df) and columns == list(df.columns) and np.array_equal(idx.values, new_index.values):
return df, labels
return df.reindex(index=new_index, columns=columns, fill_value=fill_value, **kwargs), labels
def from_series(s, sort_rows=False, fill_value=nan, meta=None, **kwargs):
r"""
Converts Pandas Series into Array.
Parameters
----------
s : Pandas Series
Input Pandas Series.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically. Defaults to False.
fill_value : scalar, optional
Value used to fill cells corresponding to label combinations which are not present in the input Series.
Defaults to NaN.
meta : list of pairs or dict or OrderedDict or Metadata, optional
Metadata (title, description, author, creation_date, ...) associated with the array.
Keys must be strings. Values must be of type string, int, float, date, time or datetime.
Returns
-------
Array
See Also
--------
Array.to_series
Examples
--------
>>> from larray import ndtest
>>> s = ndtest((2, 2, 2), dtype=float).to_series()
>>> s # doctest: +NORMALIZE_WHITESPACE
a b c
a0 b0 c0 0.0
c1 1.0
b1 c0 2.0
c1 3.0
a1 b0 c0 4.0
c1 5.0
b1 c0 6.0
c1 7.0
dtype: float64
>>> from_series(s)
a b\c c0 c1
a0 b0 0.0 1.0
a0 b1 2.0 3.0
a1 b0 4.0 5.0
a1 b1 6.0 7.0
"""
if isinstance(s.index, pd.core.index.MultiIndex):
# TODO: use argument sort=False when it will be available
# (see https://github.com/pandas-dev/pandas/issues/15105)
df = s.unstack(level=-1, fill_value=fill_value)
# pandas (un)stack and pivot(_table) methods return a Dataframe/Series with sorted index and columns
if not sort_rows:
labels = index_to_labels(s.index, sort=False)
if isinstance(df.index, pd.core.index.MultiIndex):
index = pd.MultiIndex.from_tuples(list(product(*labels[:-1])), names=s.index.names[:-1])
else:
index = labels[0]
columns = labels[-1]
df = df.reindex(index=index, columns=columns, fill_value=fill_value)
return from_frame(df, sort_rows=sort_rows, sort_columns=sort_rows, fill_value=fill_value, meta=meta, **kwargs)
else:
name = decode(s.name, 'utf8') if s.name is not None else decode(s.index.name, 'utf8')
if sort_rows:
s = s.sort_index()
return Array(s.values, Axis(s.index.values, name), meta=meta)
def from_frame(df, sort_rows=False, sort_columns=False, parse_header=False, unfold_last_axis_name=False,
fill_value=nan, meta=None, cartesian_prod=True, **kwargs):
r"""
Converts Pandas DataFrame into Array.
Parameters
----------
df : pandas.DataFrame
Input dataframe. By default, name and labels of the last axis are defined by the name and labels of the
columns Index of the dataframe unless argument unfold_last_axis_name is set to True.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting).
Must be False if `cartesian_prod` is set to True.
Defaults to False.
sort_columns : bool, optional
Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting).
Must be False if `cartesian_prod` is set to True.
Defaults to False.
parse_header : bool, optional
Whether or not to parse columns labels. Pandas treats column labels as strings.
If True, column labels are converted into int, float or boolean when possible. Defaults to False.
unfold_last_axis_name : bool, optional
Whether or not to extract the names of the last two axes by splitting the name of the last index column of the
dataframe using ``\``. Defaults to False.
fill_value : scalar, optional
Value used to fill cells corresponding to label combinations which are not present in the input DataFrame.
Defaults to NaN.
meta : list of pairs or dict or OrderedDict or Metadata, optional
Metadata (title, description, author, creation_date, ...) associated with the array.
Keys must be strings. Values must be of type string, int, float, date, time or datetime.
cartesian_prod : bool, optional
Whether or not to expand the dataframe to a cartesian product dataframe as needed by Array.
This is an expensive operation but is absolutely required if you cannot guarantee your dataframe is already
well formed. If True, arguments `sort_rows` and `sort_columns` must be set to False.
Defaults to True.
Returns
-------
Array
See Also
--------
Array.to_frame
Examples
--------
>>> from larray import ndtest
>>> df = ndtest((2, 2, 2)).to_frame()
>>> df # doctest: +NORMALIZE_WHITESPACE
c c0 c1
a b
a0 b0 0 1
b1 2 3
a1 b0 4 5
b1 6 7
>>> from_frame(df)
a b\c c0 c1
a0 b0 0 1
a0 b1 2 3
a1 b0 4 5
a1 b1 6 7
Names of the last two axes written as ``before_last_axis_name\\last_axis_name``
>>> df = ndtest((2, 2, 2)).to_frame(fold_last_axis_name=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
c0 c1
a b\c
a0 b0 0 1
b1 2 3
a1 b0 4 5
b1 6 7
>>> from_frame(df, unfold_last_axis_name=True)
a b\c c0 c1
a0 b0 0 1
a0 b1 2 3
a1 b0 4 5
a1 b1 6 7
"""
axes_names = [decode(name, 'utf8') if isinstance(name, bytes) else name
for name in df.index.names]
# handle 2 or more dimensions with the last axis name given using \
if unfold_last_axis_name:
if isinstance(axes_names[-1], basestring) and '\\' in axes_names[-1]:
last_axes = [name.strip() for name in axes_names[-1].split('\\')]
axes_names = axes_names[:-1] + last_axes
else:
axes_names += [None]
else:
axes_names += [df.columns.name]
if cartesian_prod:
df, axes_labels = cartesian_product_df(df, sort_rows=sort_rows, sort_columns=sort_columns,
fill_value=fill_value, **kwargs)
else:
if sort_rows or sort_columns:
raise ValueError('sort_rows and sort_columns cannot not be used when cartesian_prod is set to False. '
'Please call the method sort_axes on the returned array to sort rows or columns')
axes_labels = index_to_labels(df.index, sort=False)
# Pandas treats column labels as column names (strings) so we need to convert them to values
last_axis_labels = [parse(cell) for cell in df.columns.values] if parse_header else list(df.columns.values)
axes_labels.append(last_axis_labels)
axes = AxisCollection([Axis(labels, name) for labels, name in zip(axes_labels, axes_names)])
data = df.values.reshape(axes.shape)
return Array(data, axes, meta=meta)
def set_dataframe_index_by_position(df, index_col_indices):
"""
equivalent to Dataframe.set_index but with column indices, not column labels
This is necessary to support creating an index from columns without a name or with duplicate names.
Returns a new Dataframe
"""
if not isinstance(index_col_indices, list):
index_col_indices = [index_col_indices]
index_col_indices_set = set(index_col_indices)
index_col_values = [df.iloc[:, i] for i in index_col_indices]
non_index_col_indices = [i for i in range(len(df.columns)) if i not in index_col_indices_set]
# drop the index columns from the "normal" columns of the dataframe
df = df.iloc[:, non_index_col_indices]
# add them back as index columns
df.set_index(index_col_values, inplace=True)
return df
def df_asarray(df, sort_rows=False, sort_columns=False, raw=False, parse_header=True, wide=True, cartesian_prod=True,
**kwargs):
r"""
Prepare Pandas DataFrame and then convert it into Array.
Parameters
----------
df : Pandas DataFrame
Input dataframe.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting).
Must be False if `cartesian_prod` is set to True.
Defaults to False.
sort_columns : bool, optional
Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting).
Must be False if `cartesian_prod` is set to True.
Defaults to False.
raw : bool, optional
Whether or not to consider the input dataframe as a raw dataframe, i.e. read without index at all.
If True, build the first N-1 axes of the output array from the first N-1 dataframe columns. Defaults to False.
parse_header : bool, optional
Whether or not to parse columns labels. Pandas treats column labels as strings.
If True, column labels are converted into int, float or boolean when possible. Defaults to True.
wide : bool, optional
Whether or not to assume the array is stored in "wide" format.
If False, the array is assumed to be stored in "narrow" format: one column per axis plus one value column.
Defaults to True.
cartesian_prod : bool, optional
Whether or not to expand the dataframe to a cartesian product dataframe as needed by Array.
This is an expensive operation but is absolutely required if you cannot guarantee your dataframe is already
well formed. If True, arguments `sort_rows` and `sort_columns` must be set to False.
Defaults to True.
Returns
-------
Array
"""
# we could inline df_asarray into the functions that use it, so that the original (non-cartesian) df is freed from
# memory at this point, but it would be much uglier and would not lower the peak memory usage which happens during
# cartesian_product_df.reindex
# raw = True: the dataframe was read without index at all (ie 2D dataframe),
# irrespective of the actual data dimensionality
if raw:
columns = df.columns.values.tolist()
if wide:
try:
# take the first column which contains '\'
pos_last = next(i for i, v in enumerate(columns) if isinstance(v, basestring) and '\\' in v)
except StopIteration:
# we assume first column will not contain data
pos_last = 0
# This is required to handle int column names (otherwise we can simply use column positions in set_index).
# This is NOT the same as df.columns[list(range(...))] !
df = set_dataframe_index_by_position(df, list(range(pos_last + 1)))
else:
df = set_dataframe_index_by_position(df, list(range(len(df.columns) - 1)))
series = df.iloc[:, -1]
series.name = df.index.name
return from_series(series, sort_rows=sort_columns, **kwargs)
# handle 1D arrays
if len(df) == 1 and (pd.isnull(df.index.values[0]) or
(isinstance(df.index.values[0], basestring) and df.index.values[0].strip() == '')):
if parse_header:
df.columns = pd.Index([parse(cell) for cell in df.columns.values], name=df.columns.name)
series = df.iloc[0]
series.name = df.index.name
if sort_rows:
raise ValueError('sort_rows=True is not valid for 1D arrays. Please use sort_columns instead.')
res = from_series(series, sort_rows=sort_columns)
else:
def parse_axis_name(name):
if isinstance(name, bytes):
name = decode(name, 'utf8')
if not name:
name = None
return name
axes_names = [parse_axis_name(name) for name in df.index.names]
unfold_last_axis_name = isinstance(axes_names[-1], basestring) and '\\' in axes_names[-1]
res = from_frame(df, sort_rows=sort_rows, sort_columns=sort_columns, parse_header=parse_header,
unfold_last_axis_name=unfold_last_axis_name, cartesian_prod=cartesian_prod, **kwargs)
# ugly hack to avoid anonymous axes converted as axes with name 'Unnamed: x' by pandas
# we also take the opportunity to change axes with empty name to real anonymous axes (name is None) to
# make them roundtrip correctly, based on the assumption that in an in-memory LArray an anonymouse axis is more
# likely and useful than an Axis with an empty name.
# TODO : find a more robust and elegant solution
res = res.rename({axis: None for axis in res.axes if isinstance(axis.name, basestring) and
(axis.name == '' or 'Unnamed:' in axis.name)})
return res
|
gpl-3.0
|
ambikeshwar1991/gnuradio
|
gr-filter/examples/decimate.py
|
13
|
5841
|
#!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = gr.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = gr.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
datapythonista/pandas
|
pandas/tests/indexes/test_indexing.py
|
1
|
8571
|
"""
test_indexing tests the following Index methods:
__getitem__
get_loc
get_value
__contains__
take
where
get_indexer
slice_locs
asof_locs
The corresponding tests.indexes.[index_type].test_indexing files
contain tests for the corresponding methods specific to those Index subclasses.
"""
import numpy as np
import pytest
from pandas.errors import InvalidIndexError
from pandas import (
DatetimeIndex,
Float64Index,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
class TestTake:
def test_take_invalid_kwargs(self, index):
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
index.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
index.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
index.take(indices, mode="clip")
def test_take(self, index):
indexer = [4, 3, 0, 2]
if len(index) < 5:
# not enough elements; ignore
return
result = index.take(indexer)
expected = index[indexer]
assert result.equals(expected)
if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
msg = r"'(.*Index)' object has no attribute 'freq'"
with pytest.raises(AttributeError, match=msg):
index.freq
def test_take_minus1_without_fill(self, index):
# -1 does not get treated as NA unless allow_fill=True is passed
if len(index) == 0:
# Test is not applicable
return
result = index.take([0, 0, -1])
expected = index.take([0, 0, len(index) - 1])
tm.assert_index_equal(result, expected)
class TestContains:
@pytest.mark.parametrize(
"index,val",
[
(Index([0, 1, 2]), 2),
(Index([0, 1, "2"]), "2"),
(Index([0, 1, 2, np.inf, 4]), 4),
(Index([0, 1, 2, np.nan, 4]), 4),
(Index([0, 1, 2, np.inf]), np.inf),
(Index([0, 1, 2, np.nan]), np.nan),
],
)
def test_index_contains(self, index, val):
assert val in index
@pytest.mark.parametrize(
"index,val",
[
(Index([0, 1, 2]), "2"),
(Index([0, 1, "2"]), 2),
(Index([0, 1, 2, np.inf]), 4),
(Index([0, 1, 2, np.nan]), 4),
(Index([0, 1, 2, np.inf]), np.nan),
(Index([0, 1, 2, np.nan]), np.inf),
# Checking if np.inf in Int64Index should not cause an OverflowError
# Related to GH 16957
(Int64Index([0, 1, 2]), np.inf),
(Int64Index([0, 1, 2]), np.nan),
(UInt64Index([0, 1, 2]), np.inf),
(UInt64Index([0, 1, 2]), np.nan),
],
)
def test_index_not_contains(self, index, val):
assert val not in index
@pytest.mark.parametrize(
"index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")]
)
def test_mixed_index_contains(self, index, val):
# GH#19860
assert val in index
@pytest.mark.parametrize(
"index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)]
)
def test_mixed_index_not_contains(self, index, val):
# GH#19860
assert val not in index
def test_contains_with_float_index(self):
# GH#22085
integer_index = Int64Index([0, 1, 2, 3])
uinteger_index = UInt64Index([0, 1, 2, 3])
float_index = Float64Index([0.1, 1.1, 2.2, 3.3])
for index in (integer_index, uinteger_index):
assert 1.1 not in index
assert 1.0 in index
assert 1 in index
assert 1.1 in float_index
assert 1.0 not in float_index
assert 1 not in float_index
def test_contains_requires_hashable_raises(self, index):
if isinstance(index, MultiIndex):
return # TODO: do we want this to raise?
msg = "unhashable type: 'list'"
with pytest.raises(TypeError, match=msg):
[] in index
msg = "|".join(
[
r"unhashable type: 'dict'",
r"must be real number, not dict",
r"an integer is required",
r"\{\}",
r"pandas\._libs\.interval\.IntervalTree' is not iterable",
]
)
with pytest.raises(TypeError, match=msg):
{} in index._engine
class TestGetValue:
@pytest.mark.parametrize(
"index", ["string", "int", "datetime", "timedelta"], indirect=True
)
def test_get_value(self, index):
# TODO: Remove function? GH#19728
values = np.random.randn(100)
value = index[67]
with pytest.raises(AttributeError, match="has no attribute '_values'"):
# Index.get_value requires a Series, not an ndarray
with tm.assert_produces_warning(FutureWarning):
index.get_value(values, value)
with tm.assert_produces_warning(FutureWarning):
result = index.get_value(Series(values, index=values), value)
tm.assert_almost_equal(result, values[67])
class TestGetIndexer:
def test_get_indexer_base(self, index):
if index._index_as_unique:
expected = np.arange(index.size, dtype=np.intp)
actual = index.get_indexer(index)
tm.assert_numpy_array_equal(expected, actual)
else:
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
index.get_indexer(index)
with pytest.raises(ValueError, match="Invalid fill method"):
index.get_indexer(index, method="invalid")
def test_get_indexer_consistency(self, index):
# See GH#16819
if index._index_as_unique:
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
class TestConvertSliceIndexer:
def test_convert_almost_null_slice(self, index):
# slice with None at both ends, but not step
key = slice(None, None, "foo")
if isinstance(index, IntervalIndex):
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
index._convert_slice_indexer(key, "loc")
else:
msg = "'>=' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
index._convert_slice_indexer(key, "loc")
class TestPutmask:
def test_putmask_with_wrong_mask(self, index):
# GH#18368
if not len(index):
return
fill = index[0]
msg = "putmask: mask and data must be the same size"
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) + 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) - 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask("foo", fill)
@pytest.mark.parametrize(
"idx", [Index([1, 2, 3]), Index([0.1, 0.2, 0.3]), Index(["a", "b", "c"])]
)
def test_getitem_deprecated_float(idx):
# https://github.com/pandas-dev/pandas/issues/34191
with tm.assert_produces_warning(FutureWarning):
result = idx[1.0]
expected = idx[1]
assert result == expected
def test_maybe_cast_slice_bound_kind_deprecated(index):
if not len(index):
return
with tm.assert_produces_warning(FutureWarning):
# passed as keyword
index._maybe_cast_slice_bound(index[0], "left", kind="loc")
with tm.assert_produces_warning(FutureWarning):
# pass as positional
index._maybe_cast_slice_bound(index[0], "left", "loc")
|
bsd-3-clause
|
rdipietro/pyrvm
|
rvm.py
|
1
|
8538
|
import itertools
import numpy as np
from sklearn.metrics import pairwise
import pulp
class RVM(object):
"""Ranking Vector Machine
Learn and predict orderings of vectors using large-margin criteria.
This is an implementation of the ranking-vector-machine algorithm from
Yu, Hwanjo and Kim, Sungchul. "SVM Tutorial: Classification, Regression and
Ranking." Handbook of Natural Computing. Springer Berlin Heidelberg, 2012.
Parameters
----------
C : float, optional (default=1.0)
Slack parameter.
kernel : string, optional (default='linear')
Specifies the kernel type to be used in the algorithm. 'linear',
'rbf', 'chi2', or a callable are common options. See
`sklearn.metrics.pairwise.pairwise_kernels`.
solver : pulp.solvers.LpSolver, optional (default=pulp.solvers.GLPK(msg=0))
The solver used for the linear program. See `pulp.solvers`.
verbose : boolean, optional (default=False)
Examples
--------
>>> from math import cos, sin, pi
>>> import numpy as np
>>> from pyrvm import RVM
>>> # Create points that spiral along the z axis, with higher rankings at lower
>>> # values of z.
>>> n_points = 50
>>> points = [[cos(2*pi*5*t), sin(2*pi*5*t), t] for t in np.linspace(0, 1, n_points)]
>>> X = np.array(points)
>>> # This algorithm is sensitive to constant shifts. To see this, uncomment
>>> # this mean subtraction; we'll see the algorithm fail.
>>> X = X - X.mean(0)
>>> y = range(n_points)
>>> # Train a linear RVM using half of the data. We keep the slack penalty C high
>>> # here because we know that the points can be ranked linearly with no errors.
>>> ranker = RVM(C=100.0)
>>> ranker.fit(X[0::2, :], y[0::2])
Out<rvm.RVM at 0x116f2d0d0>
>>> # Since we used a linear kernel, we can determine the weight vector in the
>>> # original space that determines ranking. It should be in the direction
>>> # of -z.
print sum(ranker._alpha[ranker._alpha != 0, np.newaxis] * ranker._rank_vectors, 0)
[ 1.88497489e-05 1.58543033e-05 -2.45000255e+01]
>>> # Now let's see how we do on the other half of the data.
>>> print ranker.predict(X[1::2, :])
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24]
"""
def __init__(self, C=1.0, kernel='linear',
solver=pulp.solvers.GLPK(msg=0), verbose=False):
self.C = C
self.kernel = kernel
self.solver = solver
self.verbose = verbose
self._linprog = None
self._alpha = None
self._rank_vectors = None
def decision_function(self, X):
"""Scores related to the ordering of the samples X.
Note that higher scores correspond to higher rankings. For example,
for three ordered samples (say ranks 1, 2, 3) we would expect the
corresponding scores to decrease (say 9.5, 6.2, 3.5).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors.
Returns
-------
scores : array-like, shape = [n_samples]
The higher the score, the higher the rank. For example,
if the x_1's rank is 1 and x_2's rank is 2, then
x_1's score will be higher than x_2's score.
"""
if self._rank_vectors is None:
raise Exception('Attempted to predict before fitting model')
alpha = self._alpha
gram_matrix = pairwise.pairwise_kernels(self._rank_vectors, X, metric=self.kernel)
scores = np.sum(alpha[alpha != 0, np.newaxis] * gram_matrix, 0)
return scores
def fit(self, X, y):
"""Fit the RVM model to the given training data.
Pairs of unequal ordering are used for training. For example, if
rank(x_1) = rank(x_2) = 1 and rank(x_3) = 2, then the pairs
(x_1, x_3) and (x_2, x_3) will be used to train the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors.
y : array-like, shape = [n_samples]
Training ordering with one rank per sample.
Returns
-------
self : object
Returns self.
"""
gram_matrix = pairwise.pairwise_kernels(X, metric=self.kernel)
index_pairs = ranked_index_pairs(y)
n_points = X.shape[0]
n_pairs = len(index_pairs)
if self.verbose:
print 'rvm: Setting up the linear program and its objective..'
# Set up the linear program and specify the variables, their domains,
# and the objective. (We only specify lower bounds because the upper
# bound is infinity by default.)
self._linprog = pulp.LpProblem('RVM', pulp.LpMinimize)
alpha = [pulp.LpVariable('alpha%d' % i, 0) for i in xrange(n_points)]
alpha_coefs = [1.0]*n_points
xi = [pulp.LpVariable('xi%d' % i, 0) for i in xrange(n_pairs)]
xi_coefs = [self.C]*n_pairs
self._linprog += pulp.lpDot(alpha_coefs + xi_coefs, alpha + xi)
if self.verbose:
print 'rvm: Adding constraints to the linear program..'
# Add the constraints, one for each pair formed above.
for l, (u, v) in enumerate(index_pairs):
variables = alpha + [xi[l]]
coefs = list(gram_matrix[:, u] - gram_matrix[:, v]) + [1.0]
# LpAffineExpression isn't as clear as using lpDot, but for some
# reason it's faster.
self._linprog += pulp.LpAffineExpression(zip(variables, coefs)) >= 1
if self.verbose:
print "rvm: Solving linear program.."
status = self._linprog.solve(self.solver)
if status != 1:
raise Exception('rvm: Unknown error occurred while trying to solve linear program')
self._alpha = np.array([pulp.value(a) for a in alpha])
self._rank_vectors = X[self._alpha != 0, :]
if self.verbose:
print 'rvm: Fit complete.'
return self
def predict(self, X):
"""Compute the ordering of the samples X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array-like, shape = [n_samples]
"""
scores = self.decision_function(X)
return np.argsort(-scores)
def score(self, X, y):
"""Performance metric based on Kendall's tau metric.
This is (the number of true pairs we predicted correctly) / (the total
number of true pairs).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples]
True ordering for X.
Returns
-------
score : float
"""
y_pred = self.predict(X)
return kendall_tau_metric(y, y_pred)
def ranked_index_pairs(y):
"""Return all index pairs that satisfy y[i] < y[j].
Parameters
----------
y : array-like, shape = [n_samples]
An ordering
Returns
-------
index_pairs : list
List of tuples, each being one index pair (i, j)
"""
index_pairs = []
for ranking in sorted(np.unique(y)):
index_pairs += itertools.product(np.flatnonzero(y == ranking), np.flatnonzero(ranking < y))
return index_pairs
def kendall_tau_metric(y_true, y_pred):
"""Performance metric based on Kendall's tau metric.
This is (the number of true pairs we predicted correctly) / (the total
number of true pairs).
Parameters
----------
y_true : array-like, shape = [n_samples]
True rankings.
y_pred : array-like, shape = [n_samples]
Predicted rankings.
Returns
-------
score : float
"""
true_pairs = ranked_index_pairs(y_true)
predicted_pairs = ranked_index_pairs(y_pred)
incorrect_pairs = list(set(true_pairs) - set(predicted_pairs))
return 1 - float(len(incorrect_pairs))/len(true_pairs)
|
mit
|
lbeltrame/bcbio-nextgen
|
bcbio/qc/qualimap.py
|
1
|
18318
|
"""Quality control using Qualimap.
http://qualimap.bioinfo.cipf.es/
"""
import glob
import os
import shutil
import pandas as pd
import pybedtools
import toolz as tz
import toolz.dicttoolz as dtz
from bcbio.log import logger
from bcbio import bam, utils
from bcbio.bam import readstats
from bcbio.ngsalign import postalign
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.rnaseq import gtf, salmon
from bcbio.variation import bedutils
# ## Standard Qualimap
def run(bam_file, data, out_dir):
"""Run qualimap to assess alignment quality metrics.
"""
# Qualimap results should be saved to a directory named after sample.
# MultiQC (for parsing additional data) picks the sample name after the dir as follows:
# <sample name>/raw_data_qualimapReport/insert_size_histogram.txt
results_dir = os.path.join(out_dir, dd.get_sample_name(data))
resources = config_utils.get_resources("qualimap", data["config"])
options = " ".join(resources.get("options", ""))
results_file = os.path.join(results_dir, "genome_results.txt")
report_file = os.path.join(results_dir, "qualimapReport.html")
utils.safe_makedir(results_dir)
pdf_file = "qualimapReport.pdf"
if not utils.file_exists(results_file) and not utils.file_exists(os.path.join(results_dir, pdf_file)):
if "qualimap_full" in tz.get_in(("config", "algorithm", "tools_on"), data, []):
logger.info("Full qualimap analysis for %s may be slow." % bam_file)
ds_bam = bam_file
else:
ds_bam = bam.downsample(bam_file, data, 1e7, work_dir=out_dir)
bam_file = ds_bam if ds_bam else bam_file
if options.find("PDF") > -1:
options = "%s -outfile %s" % (options, pdf_file)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
qualimap = config_utils.get_program("qualimap", data["config"])
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
num_cores)
with file_transaction(data, results_dir) as tx_results_dir:
utils.safe_makedir(tx_results_dir)
export = "%s%s export JAVA_OPTS='-Xms32m -Xmx%s -Djava.io.tmpdir=%s' && " % (
utils.java_freetype_fix(), utils.local_path_export(), max_mem, tx_results_dir)
cmd = ("unset DISPLAY && {export} {qualimap} bamqc -bam {bam_file} -outdir {tx_results_dir} "
"--skip-duplicated --skip-dup-mode 0 "
"-nt {num_cores} {options}")
species = None
if (tz.get_in(("genome_resources", "aliases", "human"), data, "")
or dd.get_genome_build(data).startswith(("hg", "GRCh"))):
species = "HUMAN"
elif dd.get_genome_build(data).startswith(("mm", "GRCm")):
species = "MOUSE"
if species in ["HUMAN", "MOUSE"]:
cmd += " -gd {species}"
regions = (dd.get_coverage(data) if dd.get_coverage(data) not in [None, False, "None"]
else dd.get_variant_regions_merged(data))
if regions:
regions = bedutils.merge_overlaps(bedutils.clean_file(regions, data), data)
bed6_regions = _bed_to_bed6(regions, out_dir)
cmd += " -gff {bed6_regions}"
bcbio_env = utils.get_bcbio_env()
do.run(cmd.format(**locals()), "Qualimap: %s" % dd.get_sample_name(data), env=bcbio_env)
tx_results_file = os.path.join(tx_results_dir, "genome_results.txt")
cmd = "sed -i 's/bam file = .*/bam file = %s.bam/' %s" % (dd.get_sample_name(data), tx_results_file)
do.run(cmd, "Fix Name Qualimap for {}".format(dd.get_sample_name(data)))
# Qualimap output folder (results_dir) needs to be named after the sample (see comments above). However, in order
# to keep its name after upload, we need to put the base QC file (results_file) into the root directory (out_dir):
base_results_file = os.path.join(out_dir, os.path.basename(results_file))
shutil.copyfile(results_file, base_results_file)
return {"base": base_results_file,
"secondary": _find_qualimap_secondary_files(results_dir, base_results_file)}
def _parse_qualimap_metrics(report_file, data):
"""Extract useful metrics from the qualimap HTML report file.
"""
if not utils.file_exists(report_file):
return {}
from bs4 import BeautifulSoup
out = {}
parsers = {"Globals": _parse_qualimap_globals,
"Globals (inside of regions)": _parse_qualimap_globals_inregion,
"Coverage": _parse_qualimap_coverage,
"Coverage (inside of regions)": _parse_qualimap_coverage,
"Insert size": _parse_qualimap_insertsize,
"Insert size (inside of regions)": _parse_qualimap_insertsize}
with open(report_file) as in_handle:
root = BeautifulSoup(in_handle.read(), "html.parser")
for table in root.find_all("div", class_="table-summary"):
h3 = table.find("h3")
if h3.text in parsers:
out.update(parsers[h3.text](table.find("table")))
new_names = []
for metric in out:
if "qualimap_full" not in tz.get_in(("config", "algorithm", "tools_on"), data, []):
metric += "_qualimap_1e7reads_est"
new_names.append(metric)
out = dict(zip(new_names, out.values()))
return out
def _parse_num_pct(k, v):
num, pct = v.split(" / ")
return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()}
def _parse_qualimap_globals(table):
"""Retrieve metrics of interest from globals table.
"""
out = {}
want = {"Mapped reads": _parse_num_pct,
"Duplication rate": lambda k, v: {k: v}}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col in want:
out.update(want[col](col, val))
return out
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out
def _parse_qualimap_insertsize(table):
"""Parse insert size metrics.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Median":
out["Insert size (Median)"] = val
return out
def _bed_to_bed6(orig_file, out_dir):
"""Convert bed to required bed6 inputs.
"""
bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file)))
if not utils.file_exists(bed6_file):
with open(bed6_file, "w") as out_handle:
for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)):
region = [x for x in list(region) if x]
fillers = [str(i), "1.0", "+"]
full = region + fillers[:6 - len(region)]
out_handle.write("\t".join(full) + "\n")
return bed6_file
# ## RNAseq Qualimap
def _parse_metrics(metrics):
# skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
# This is removing correct values
# metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)
# missing = set(["Genes Detected", "Transcripts Detected", "Mean Per Base Cov."])
correct = set(["rRNA", "rRNA_rate"])
percentages = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
to_change = dict({"5'-3' bias": 1,
"Intergenic pct": "Intergenic Rate",
"Intronic pct": "Intronic Rate",
"Exonic pct": "Exonic Rate",
"Duplication Rate of Mapped": 1,
"Average_insert_size": 1,
})
total = ["Not aligned", "Aligned to genes", "No feature assigned"]
out = {}
def _safe_int(x):
"""Handle non integer values like nan
"""
try:
return int(x)
except ValueError:
return 0
total_reads = sum([_safe_int(metrics[name]) for name in total])
out.update({key: val for key, val in metrics.items() if key in correct})
[metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in
percentages]
for name in to_change:
if not to_change[name] or metrics[name] is None:
continue
try:
if to_change[name] == 1:
out.update({name: float(metrics[name])})
else:
out.update({to_change[name]: float(metrics[name])})
# if we can't convert metrics[name] to float (?'s or other non-floats)
except ValueError:
continue
return out
def _detect_duplicates(bam_file, out_dir, data):
"""
count duplicate percentage
"""
out_file = os.path.join(out_dir, "dup_metrics.txt")
if not utils.file_exists(out_file):
dup_align_bam = postalign.dedup_bam(bam_file, data)
logger.info("Detecting duplicates in %s." % dup_align_bam)
dup_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=False)
tot_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=True)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("%s\n%s\n" % (dup_count, tot_count))
with open(out_file) as in_handle:
dupes = float(next(in_handle).strip())
total = float(next(in_handle).strip())
if total == 0:
rate = "NA"
else:
rate = dupes / total
return {"Duplication Rate of Mapped": rate}
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
c, bio, source, s, e = line.split("\t")[:5]
if bio.startswith("rRNA"):
out_handle.write(("{0}:{1}-{2}\n").format(c, s, e))
def _detect_rRNA(data, out_dir):
out_file = os.path.join(out_dir, "rRNA_metrics.txt")
if not utils.file_exists(out_file):
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
quant = tz.get_in(["quant", "tsv"], data)
if not quant:
salmon_dir = dd.get_salmon_dir(data)
if salmon_dir:
quant = os.path.join(salmon_dir, "quant.sf")
logger.info("Calculating RNA-seq rRNA metrics for %s." % quant)
rrna_features = gtf.get_rRNA(gtf_file)
transcripts = set([x[1] for x in rrna_features if x])
if not (transcripts and quant and utils.file_exists(quant)):
return {'rRNA': "NA", "rRNA_rate": "NA"}
sample_table = pd.read_csv(quant, sep="\t")
rrna_exp = list(map(float, sample_table[sample_table["Name"].isin(transcripts)]["NumReads"]))
total_exp = list(map(float, sample_table["NumReads"]))
rrna = sum(rrna_exp)
if sum(total_exp) == 0:
rrna_rate = "NA"
else:
rrna_rate = float(rrna) / sum(total_exp)
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write(",".join(["rRNA", str(rrna)]) + "\n")
out_handle.write(",".join(["rRNA_rate", str(rrna_rate)]) + "\n")
return _read_memoized_rrna(out_file)
def _read_memoized_rrna(rrna_file):
rrna_dict = {}
with open(rrna_file) as in_handle:
for line in in_handle:
tokens = line.strip().split(",")
rrna_dict[tokens[0]] = tokens[1]
return rrna_dict
def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
col = col.replace(":", "").strip()
val = val.replace(",", "")
m = {col: val}
if val.find("/") > -1:
m = _parse_num_pct(col, val.replace("%", ""))
out.update(m)
return out
def _parse_rnaseq_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
from bs4 import BeautifulSoup
out = {}
parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"]
with open(report_file) as in_handle:
root = BeautifulSoup(in_handle.read(), "html.parser")
for table in root.find_all("div", class_="table-summary"):
h3 = table.find("h3")
if h3.text in parsers:
out.update(_parse_qualimap_rnaseq(table.find("table")))
return out
def run_rnaseq(bam_file, data, out_dir):
"""
Run qualimap for a rnaseq bam file and parse results
"""
strandedness = {"firststrand": "strand-specific-forward",
"secondstrand": "strand-specific-reverse",
"unstranded": "non-strand-specific"}
# Qualimap results should be saved to a directory named after sample.
# MultiQC (for parsing additional data) picks the sample name after the dir as follows:
# <sample name>/raw_data_qualimapReport/insert_size_histogram.txt
results_dir = os.path.join(out_dir, dd.get_sample_name(data))
results_file = os.path.join(results_dir, "rnaseq_qc_results.txt")
report_file = os.path.join(results_dir, "qualimapReport.html")
config = data["config"]
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
library = strandedness[dd.get_strandedness(data)]
# don't run qualimap on the full bam by default
if "qualimap_full" in tz.get_in(("config", "algorithm", "tools_on"), data, []):
logger.info(f"Full qualimap analysis for {bam_file} may be slow.")
ds_bam = bam_file
else:
logger.info(f"Downsampling {bam_file} for Qualimap run.")
ds_bam = bam.downsample(bam_file, data, 1e7, work_dir=out_dir)
bam_file = ds_bam if ds_bam else bam_file
if not utils.file_exists(results_file):
with file_transaction(data, results_dir) as tx_results_dir:
utils.safe_makedir(tx_results_dir)
bam.index(bam_file, config)
cmd = _rnaseq_qualimap_cmd(data, bam_file, tx_results_dir, gtf_file, library)
do.run(cmd, "Qualimap for {}".format(dd.get_sample_name(data)))
tx_results_file = os.path.join(tx_results_dir, "rnaseq_qc_results.txt")
cmd = "sed -i 's/bam file = .*/bam file = %s.bam/' %s" % (dd.get_sample_name(data), tx_results_file)
do.run(cmd, "Fix Name Qualimap for {}".format(dd.get_sample_name(data)))
metrics = _parse_rnaseq_qualimap_metrics(report_file)
metrics.update(_detect_duplicates(bam_file, results_dir, data))
metrics.update(_detect_rRNA(data, results_dir))
metrics.update({"Average_insert_size": salmon.estimate_fragment_size(data)})
metrics = _parse_metrics(metrics)
# Qualimap output folder (results_dir) needs to be named after the sample (see comments above). However, in order
# to keep its name after upload, we need to put the base QC file (results_file) into the root directory (out_dir):
base_results_file = os.path.join(out_dir, os.path.basename(results_file))
shutil.copyfile(results_file, base_results_file)
return {"base": base_results_file,
"secondary": _find_qualimap_secondary_files(results_dir, base_results_file),
"metrics": metrics}
def _rnaseq_qualimap_cmd(data, bam_file, out_dir, gtf_file=None, library="non-strand-specific"):
"""
Create command lines for qualimap
"""
config = data["config"]
qualimap = config_utils.get_program("qualimap", config)
resources = config_utils.get_resources("qualimap", config)
num_cores = resources.get("cores", dd.get_num_cores(data))
max_mem = config_utils.adjust_memory(resources.get("memory", "2G"),
num_cores)
export = "%s%s" % (utils.java_freetype_fix(), utils.local_path_export())
export = "%s%s export JAVA_OPTS='-Xms32m -Xmx%s -Djava.io.tmpdir=%s' && " % (
utils.java_freetype_fix(), utils.local_path_export(), max_mem, out_dir)
if library != "non-strand-specific":
logger.info("Qualimap can get the orientation wrong for stranded reads, so we run it in unstranded mode. This gives comparable results to unstranded for RNA-seq data (see https://groups.google.com/forum/#!topic/qualimap/ZGo-k8LGmHQ) for a further explanation.")
library = "non-strand-specific"
paired = " --paired" if bam.is_paired(bam_file) else ""
cmd = ("unset DISPLAY && {export} {qualimap} rnaseq -outdir {out_dir} "
"-a proportional -bam {bam_file} -p {library}{paired} "
"-gtf {gtf_file}").format(**locals())
return cmd
def _find_qualimap_secondary_files(results_dir, base_file):
"""Retrieve additional files, avoiding double uploading the base file.
"""
def not_dup(x):
is_dup = (os.path.basename(x) == os.path.basename(base_file) and
os.path.getsize(x) == os.path.getsize(base_file))
return not is_dup
return list(filter(not_dup,
glob.glob(os.path.join(results_dir, 'qualimapReport.html')) +
glob.glob(os.path.join(results_dir, '*.txt')) +
glob.glob(os.path.join(results_dir, "css", "*")) +
glob.glob(os.path.join(results_dir, "raw_data_qualimapReport", "*")) +
glob.glob(os.path.join(results_dir, "images_qualimapReport", "*"))))
|
mit
|
iaklampanos/bde-pilot-2
|
backend/api_methods.py
|
1
|
39958
|
"""
CLASS INFO
---------------------------------------------------------------------------
This class acts as the model (from MVC framework) for the SC5 #3 pilot.
---------------------------------------------------------------------------
"""
from Dataset_transformations import Dataset_transformations
from Detection import Detection
import dataset_utils as utils
import numpy as np
from netCDF4 import Dataset
import urllib
import getpass
import os
import math
import datetime
import time
from geojson import Feature, Point, MultiPoint, MultiLineString, LineString, FeatureCollection
import cPickle
from sklearn.preprocessing import maxabs_scale, scale, minmax_scale
from shapely.geometry import shape, Point, Polygon, mapping, MultiPolygon, MultiPoint
import scipy.misc
import json
import threading
import Queue
import itertools
from dbconn import DBConn
from itertools import chain
import re
APPS_ROOT = os.path.dirname(os.path.abspath(__file__))
conn = DBConn().engine
semagrow_batch_size = 100
# TODO: Update function with gdal python API
# This Function returns a dispersion that consist of 72 hours as a single frame.
# For this purpose we calculate the integral of a dispersion.
def dispersion_integral(dataset_name):
# Load NetCDF file from local path
dataset = Dataset(APPS_ROOT + '/' + dataset_name, 'r')
dsout = Dataset(APPS_ROOT + '/' + 'int_' + dataset_name,
'w', format='NETCDF3_CLASSIC')
# Retrieve both pollutants
c137 = dataset.variables['C137'][:]
i131 = dataset.variables['I131'][:]
# Calculate their sum
c137 = np.sum(c137, axis=0).reshape(501, 501)
i131 = np.sum(i131, axis=0).reshape(501, 501)
# Write the disperion integrals on disk in NetCDF format.
# We need the dispersion integrals in NetCDF format due to the fact that we
# use integrals for visualization. In order to visualize geographical information
# in our application we use a set of tools accesible through the OS, like
# gdal_translate
# Copy attributes from original file
for gattr in dataset.ncattrs():
gvalue = dataset.getncattr(gattr)
dsout.setncattr(gattr, gvalue)
# Copy dimensions from original file
for dname, dim in dataset.dimensions.iteritems():
if dname == 'time':
dsout.createDimension(dname, 1 if not dim.isunlimited() else None)
else:
dsout.createDimension(dname, len(
dim) if not dim.isunlimited() else None)
print dsout.dimensions
# Copy every other variable from the original file except from
# the pollutant variables
for v_name, varin in dataset.variables.iteritems():
if v_name == 'C137':
outVar = dsout.createVariable(
v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k)
for k in varin.ncattrs()})
outVar[:] = c137[:]
elif v_name == 'I131':
outVar = dsout.createVariable(
v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k)
for k in varin.ncattrs()})
outVar[:] = i131[:]
else:
try:
outVar = dsout.createVariable(
v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k)
for k in varin.ncattrs()})
outVar[:] = varin[:]
# Catch exception on time variable
except:
outVar[:] = varin[0]
# Finish writing process
dsout.close()
# This function calculates the wind direction from a NetCDF file.
# The wind direction is calculated and visualized by computing the dot product
# of the U wind direction and the V wind direction. After calculating the dot product
# we create arrows that represent the wind direction for the whole grid.
def calc_winddir(dataset_name, level):
# Load NetCDF file from local file
dataset = Dataset(APPS_ROOT + '/' + dataset_name, 'r')
# Retrieve U Wind direction
u = dataset.variables['UU'][:, level, :, range(0, 64)].reshape(13, 4096)
# Retrieve V Wind direction
v = dataset.variables['VV'][:, level, range(0, 64), :].reshape(13, 4096)
# Retrieve Latitude and Longitude values
lat = dataset.variables['XLAT_M'][0, :, :].flatten()
lon = dataset.variables['XLONG_M'][0, :, :].flatten()
# Calculate sum of all time frames for U and V wind direction
u = np.sum(u, axis=0)
v = np.sum(v, axis=0)
# Turn UV into [0,1] vector
uv = np.vstack((u, v))
uv = np.divide(uv, np.max(uv))
# Create 2 Points for each (lat,lon) pair in the grid
# Point1 with coordinates of (latN,lonN)
# Point2 with coordinates of (latN+UV[N],lonN+UV[N])
x1 = lon
y1 = lat
x1 = [float(i) for i in lon]
y1 = [float(i) for i in lat]
x2 = []
y2 = []
# Calculate Point2
for i in range(0, uv.shape[1]):
x2.append(float(x1[i] + uv[0][i]))
y2.append(float(y1[i] + uv[1][i]))
# Placeholder for every pair of points
arr = []
# Calculate arrow between Point1 and Point2 basd on
# https://math.stackexchange.com/questions/1314006/drawing-an-arrow
for i in range(0, uv.shape[1]):
L1 = math.sqrt((x1[i] - x2[i]) * (x1[i] - x2[i]) +
(y2[i] - y1[i]) * (y2[i] - y1[i]))
L2 = float(L1 / 3.5)
x3 = x2[i] + (L2 / L1) * ((x1[i] - x2[i]) * math.cos((math.pi / 6)
) + (y1[i] - y2[i]) * math.sin((math.pi) / 6))
y3 = y2[i] + (L2 / L1) * ((y1[i] - y2[i]) * math.cos((math.pi / 6)
) - (x1[i] - x2[i]) * math.sin((math.pi) / 6))
x4 = x2[i] + (L2 / L1) * ((x1[i] - x2[i]) * math.cos((math.pi / 6)
) - (y1[i] - y2[i]) * math.sin((math.pi) / 6))
y4 = y2[i] + (L2 / L1) * ((y1[i] - y2[i]) * math.cos((math.pi / 6)
) + (x1[i] - x2[i]) * math.sin((math.pi) / 6))
a = (x1[i], y1[i])
b = (x2[i], y2[i])
c = (x3, y3)
d = (x4, y4)
# Add each point to placeholder array
temp = []
temp.append(a)
temp.append(b)
temp2 = []
temp2.append(b)
temp2.append(c)
temp3 = []
temp3.append(b)
temp3.append(d)
arr.append(temp)
arr.append(temp2)
arr.append(temp3)
feature = Feature(geometry=MultiLineString(arr))
dataset.close()
return json.dumps(feature)
# This function retrieves the closest weather file to a date given by the user.
# The weather is retrieved from POSTGRES in the form of pickled object.
# Weather files are used to exctact the GHT variable that was used in our
# experiments.
def load_class_weather(cur, date, origin):
# Safe query is used due to multiple workers accessing the same database
# and connection.
resp = DBConn().safequery("select filename,hdfs_path,GHT,EXTRACT(EPOCH FROM TIMESTAMP '" +
date + "' - date)/3600/24 as diff from weather group by date\
having EXTRACT(EPOCH FROM TIMESTAMP '" + date + "' - date)/3600/24 >= 0 order by diff;")
row = resp.fetchone()
# If model has mult in its title then we need multiple levels (500,700,900 hPa)
# of the GHT variable.
if 'mult' in origin:
# Load weather as a pickled object and scale the variable
items = cPickle.loads(str(row[2]))
items = items.reshape(items.shape[0], -1)
items = minmax_scale(items.sum(axis=0))
else:
items = cPickle.loads(str(row[2]))
# GHT 700hPa
items = items[:, 1, :, :]
items = minmax_scale(items.sum(axis=0))
return items
# This function returns top3 predictions for classification models, it needs
# to be an individual function due to classfication models receiving both
# disperions(in the form of detection maps) and weather data as input.
def get_cprediction(det_map, items, models, origin):
# Loop through every ingested model
for m in models:
# Get selected model
if origin == m[0]:
# Check if model accepts multiple levels of GHT var
if not('mult' in origin):
# Reshape dispersion and weather data in accepted form
items = items.reshape(1, 1, items.shape[0], items.shape[1])
det_map = det_map.reshape(
1, 1, det_map.shape[0], det_map.shape[1])
# Get predictions
cl = m[1].get_output(items, det_map)[0].argsort()
cl = list(cl)
cl = [int(c) for c in cl if c < 18]
# Return top3 stations
cl = cl[:3]
else:
# Reshape dispersion and weather data in accepted form
items = items.reshape(1, 1, 3, 64, 64)
det_map = det_map.reshape(
1, 1, det_map.shape[0], det_map.shape[1])
# Get predictions
cl = m[1].get_output(items, det_map)[0].argsort()
cl = list(cl)
cl = [int(c) for c in cl if c < 18]
# Return top3 stations
cl = cl[:3]
return cl
# This function is used for parallel processing of every ingested dispersion
# in order to find the closest representation for visualization purposes.
def worker(batch,q,pollutant,det_map):
disp_results = []
# For each dispersion in given batch
for row in batch:
# Load dispersion
if pollutant == 'C137':
det = cPickle.loads(str(row[2]))
else:
det = cPickle.loads(str(row[3]))
# Preprocessing of dispersion
det = scipy.misc.imresize(det, (167, 167))
det_shape = det.shape
det = maxabs_scale(det.flatten(),axis=1)
det = det.reshape(det_shape)
# Get distance between real dispersion and detection points
disp_results.append(
(row[0], 1 - scipy.spatial.distance.cosine(det.flatten(), det_map.flatten())))
# Return batch distances
q.put(disp_results)
# This function calculates distance between the top3 predicted station disperions
# and the detection points (for visualization). This is necessary because when we
# treat the source estimation problem as classification there are no exported
# byproducts as clusters and therefore there is no clear candidate for dispersion
# visualization.
def calc_scores(cur, items, cln, pollutant,det_map,origin):
# Get all dispersions for the predicted stations
resp = DBConn().safequery(
"SELECT date,hdfs_path,c137_pickle,i131_pickle from class where station=\'" + cln + "\';")
res = resp.fetchall()
# Calculate distances in 4 batches
batch_size = len(res) / 4
idx = xrange(0,len(res),batch_size)
queue = Queue.Queue()
disp_results = []
threads = []
# Split dispersions into 4 Threads
for i in range(4):
t = threading.Thread(target=worker, args=(res[idx[i]:idx[i]+batch_size],queue,pollutant,det_map))
threads.append(t)
t.start()
disp_results.append(queue.get())
disp_results = list(itertools.chain.from_iterable(disp_results))
print len(disp_results)
disp_results = sorted(disp_results, key=lambda k: k[1], reverse=True)
# Having all the distances from the detection points and the real dispersions,
# we need to select the dispersion that not only intersects with the detection
# points but also the dispersion's origin weather is close to the real weather.
resp = DBConn().safequery("SELECT date,GHT from weather;")
res = resp.fetchall()
weather_results = []
# For each weather in given date
for row in res:
# Check if model expects multiple levels of GHT var
if 'mult' in origin:
citems = cPickle.loads(str(row[1]))
citems = citems.reshape(citems.shape[0], -1)
citems = minmax_scale(citems.sum(axis=0))
else:
citems = cPickle.loads(str(row[1]))
citems = citems[:, 1, :, :]
citems = minmax_scale(citems.sum(axis=0))
# Get distance
weather_results.append(
(row[0], 1 - scipy.spatial.distance.cosine(items.flatten(), citems.flatten())))
# Return all distances
return disp_results, weather_results
# This function returns the estimated station dispersion in the GeoJSON form which
# is expected from the visualization mechanism.
def get_disp_frame(cur, cln, pollutant, results):
dispersions = []
scores = []
print results[0]
# Get selected dispersion for a certain station
resp = DBConn().safequery("select filename,hdfs_path,date,c137,i131 from class where date=TIMESTAMP \'" +
datetime.datetime.strftime(results[0], '%m-%d-%Y %H:%M:%S') + "\' and station='" + cln + "';")
row = resp.fetchone()
# Check if dispersion has already been turned into GeoJSON (already been cached)
if (row[3] == None) or (row[4] == None):
# Save dispersion NetCDF file locally
urllib.urlretrieve(row[1], row[0])
# Turn 72 hour disperion into single frame
dispersion_integral(row[0])
# Convert dispersion frame to tiff (new tiff still withholds geographical info)
os.system('gdal_translate NETCDF:\\"' + APPS_ROOT + '/' + 'int_' +
row[0] + '\\":C137 ' + row[0].split('.')[0] + '_c137.tiff')
os.system('gdal_translate NETCDF:\\"' + APPS_ROOT + '/' + 'int_' +
row[0] + '\\":I131 ' + row[0].split('.')[0] + '_i131.tiff')
# Turn HYSPLIT grid into EPSG:4326 projection and use gdal_polygonize
# in order to turn tiff into GeoJSON
os.system('make png TIFF_IN=' +
row[0].split('.')[0] + '_c137.tiff')
os.system('make png TIFF_IN=' +
row[0].split('.')[0] + '_i131.tiff')
# Delete used files
os.system('make clean')
# Load JSON Files
with open(APPS_ROOT+ '/' + row[0].split('.')[0] + '_c137.json', 'r') as c137:
c137_json = json.load(c137)
with open(APPS_ROOT + '/' + row[0].split('.')[0] + '_i131.json', 'r') as i131:
i131_json = json.load(i131)
# Update record for caching purposes
DBConn().safequery("UPDATE class SET c137=\'" +
json.dumps(c137_json) + "\' WHERE filename=\'" + row[0] + "\'")
DBConn().safequery("UPDATE class SET i131=\'" +
json.dumps(i131_json) + "\' WHERE filename=\'" + row[0] + "\'")
# Delete used files
os.system('rm ' + APPS_ROOT + '/' +
row[0].split('.')[0] + '_c137.json')
os.system('rm ' + APPS_ROOT + '/' +
row[0].split('.')[0] + '_i131.json')
os.system('rm ' + APPS_ROOT + '/' + row[0])
os.system('rm ' + APPS_ROOT + '/' + 'int_' + row[0])
# os.system('rm ' + APPS_ROOT + '/' + res[0])
# Choose selected pollutant
if pollutant == 'C137':
dispersion = json.dumps(c137_json)
else:
dispersion = json.dumps(i131_json)
dispersions.append(dispersion)
scores.append(round(results[1], 3))
# If dispersion has been cached
else:
# os.system('rm ' + APPS_ROOT + '/' + res[0])
# Choose selected pollutant
if pollutant == 'C137':
dispersion = json.dumps(row[3])
else:
dispersion = json.dumps(row[4])
dispersions.append(dispersion)
scores.append(round(results[1], 3))
return scores, dispersions
# This function retrieves grid geographical information such as latitude and
# longitude values. Lat/long values are needed in order to create the detection
# maps that are used in order to estimate the location.
def load_lat_lon(lat_lon):
llat = []
llon = []
for llobj in lat_lon:
llat.append(float(llobj['lat']))
llon.append(float(llobj['lon']))
urllib.urlretrieve(
'http://namenode:50070/webhdfs/v1/sc5/clusters/lat.npy?op=OPEN', 'lat.npy')
urllib.urlretrieve(
'http://namenode:50070/webhdfs/v1/sc5/clusters/lon.npy?op=OPEN', 'lon.npy')
filelat = np.load('lat.npy')
filelon = np.load('lon.npy')
os.system('rm ' + APPS_ROOT + '/' + 'lat.npy')
os.system('rm ' + APPS_ROOT + '/' + 'lon.npy')
return filelat, filelon, llat, llon
# This function is the one that is called by the controller when classification models
# have been selected. It uses the above functions in order to function properly.
def cdetections(cur, models, lat_lon, date, pollutant, metric, origin):
# Load weather variables
items = load_class_weather(cur, date, origin)
# Load GRID
(filelat, filelon, llat, llon) = load_lat_lon(lat_lon)
# Initialize detection map
det_obj = Detection(np.zeros(shape=(501, 501)),
filelat, filelon, llat, llon)
det_obj.get_indices()
det_obj.create_detection_map(resize=True)
det_map = det_obj._det_map
# Get prediction
cl = get_cprediction(det_map, items, models, origin)
# Get station names
resp = DBConn().safequery("SELECT station from class group by station order by station;")
res = resp.fetchall()
res = [i for i in res]
print res
print cl
class_name = [str(res[i][0]) for i in cl]
print class_name
# For each station
for cln in class_name:
# Find closest REAL dispersion representation
(disp_results, weather_results) = calc_scores(cur, items, cln, pollutant, det_map, origin)
for w in weather_results:
if w[0] == disp_results[0][0]:
d = disp_results[0]
results = (d[0],w[1]*d[1])
try:
# Create visualization friendly form of real dispersion
scores, dispersions = get_disp_frame(cur, cln, pollutant, results)
except:
d = disp_results[0]
results = (d[0],d[1])
scores, dispersions = get_disp_frame(cur, cln, pollutant, results)
# Build results as JSON
scores, dispersions, class_name = zip(
*sorted(zip(scores, dispersions, class_name), key=lambda k: k[0], reverse=True))
print scores
send = {}
send['stations'] = class_name
send['scores'] = scores
send['dispersions'] = dispersions
return json.dumps(send)
# This function loads weather data, there are two different functions for loading
# weather data due to the fact that clustering and classification methods expect
# weather data in different shape or need different preprocess.
def load_weather_data(cur, date, origin):
# Safe query is used due to multiple workers accessing the same database
# and connection.
resp = DBConn().safequery("select filename,hdfs_path,GHT,EXTRACT(EPOCH FROM TIMESTAMP '" +
date + "' - date)/3600/24 as diff from weather group by date\
having EXTRACT(EPOCH FROM TIMESTAMP '" + date + "' - date)/3600/24 >= 0 order by diff;")
res = resp.fetchone()
# If model has mult in its title then we need multiple levels (500,700,900 hPa)
# of the GHT variable.
if 'mult' in origin:
# Load weather as a pickled object
items = cPickle.loads(str(res[2]))
items = items.reshape(1, 1, items.shape[0], items.shape[
1], items.shape[2], items.shape[3])
else:
# GHT 700hPa
items = cPickle.loads(str(res[2]))
items = items[:, 1, :, :]
items = items.reshape(
1, 1, items.shape[0], 1, items.shape[1], items.shape[2])
return items,res
# This function return the closest cluster based on the real weather choosen by
# the user.
def load_cluster_date(items, models, origin):
# Convert weather data to 2 dimensions
ds = Dataset_transformations(items, 1000, items.shape)
x = items.shape[2]
y = items.shape[3]
ds.twod_transformation()
# Normalization
ds.normalize()
# Find selected model
for m in models:
# If anything else then k-means, then we need to re run the clustering
# model and get the output of the hidden layer for centroid comparison
if origin == m[0]:
if 'kmeans' not in m[0]:
clust_obj = m[2]
ds._items = m[1].get_hidden(ds._items.T)
# Select closest centroid
cd = clust_obj.centroids_distance(ds, features_first=False)
# Return centroid as date
cluster_date = utils.reconstruct_date(
clust_obj._desc_date[cd[0][0]])
# If kmeans was selected, then perform centroid comparsion in raw data
else:
clust_obj = m[1]
cd = clust_obj.centroids_distance(ds, features_first=True)
cluster_date = utils.reconstruct_date(
clust_obj._desc_date[cd[0][0]])
return cluster_date
# This functions calculates scores for each station of a certain cluster by comparing
# every dispersion with the constructed detection point map.
def calc_station_scores(cur, lat_lon, timestamp, origin, descriptor, pollutant):
# Load GRID
(filelat, filelon, llat, llon) = load_lat_lon(lat_lon)
results = []
# Get all dispersion of every station for a certain cluster
res = DBConn().safequery("select filename,hdfs_path,station,c137_pickle,i131_pickle from cluster where date=TIMESTAMP \'" +
datetime.datetime.strftime(timestamp, '%m-%d-%Y %H:%M:%S') + "\' and origin='" + origin + "' and descriptor='" + descriptor + "'")
# For each dispersion/station
for row in res:
# Create detection point map
if pollutant == 'C137':
det_obj = Detection(cPickle.loads(
str(row[3])), filelat, filelon, llat, llon)
det_obj.get_indices()
det_obj.create_detection_map()
else:
det_obj = Detection(cPickle.loads(
str(row[4])), filelat, filelon, llat, llon)
det_obj.get_indices()
det_obj.create_detection_map()
# Compare detection map to real dispersion
if det_obj.calc() != 0:
results.append((row[2], det_obj.cosine()))
else:
results.append((row[2], 0))
# Return all results
return results
# This function selects the top 3 most close stations to a dispersion and converts
# their dispersions into GeoJSON form for visualization
def get_top3_stations(cur, top3, timestamp, origin, pollutant):
# Get top 3 names and scores
top3_names = [top[0] for top in top3]
top3_scores = [round(top[1], 3) for top in top3]
stations = []
scores = []
dispersions = []
# Find their database records
resp = DBConn().safequery("select filename,hdfs_path,station,c137,i131 from cluster where date=TIMESTAMP \'" +
datetime.datetime.strftime(timestamp, '%m-%d-%Y %H:%M:%S') + "\' and origin='" + origin + "'")
rows = resp.fetchall()
# For each dispersion in a certain cluster
for row in rows:
# If the name is in top 3
if row[2] in top3_names:
# Check if dispersion visualization form has already been cached
if (row[3] == None) or (row[4] == None):
# If not, retrieve original NetCDF file
urllib.urlretrieve(row[1], row[0])
# Convert 72 hour dispersion into a single frame
dispersion_integral(row[0])
# Convert dispersion frame to tiff (new tiff still withholds geographical info)
os.system('gdal_translate NETCDF:\\"' + APPS_ROOT + '/' + 'int_' +
row[0] + '\\":C137 ' + row[0].split('.')[0] + '_c137.tiff')
os.system('gdal_translate NETCDF:\\"' + APPS_ROOT + '/' + 'int_' +
row[0] + '\\":I131 ' + row[0].split('.')[0] + '_i131.tiff')
# Turn HYSPLIT grid into EPSG:4326 projection and use gdal_polygonize
# in order to turn tiff into GeoJSON
os.system('make png TIFF_IN=' +
row[0].split('.')[0] + '_c137.tiff')
os.system('make png TIFF_IN=' +
row[0].split('.')[0] + '_i131.tiff')
# Delete used files
os.system('make clean')
# Load JSON Files
with open(row[0].split('.')[0] + '_c137.json', 'r') as c137:
c137_json = json.load(c137)
with open(row[0].split('.')[0] + '_i131.json', 'r') as i131:
i131_json = json.load(i131)
# Update record for caching purposes
DBConn().safequery("UPDATE cluster SET c137=\'" +
json.dumps(c137_json) + "\' WHERE filename=\'" + row[0] + "\'")
DBConn().safequery("UPDATE cluster SET i131=\'" +
json.dumps(i131_json) + "\' WHERE filename=\'" + row[0] + "\'")
# Delete used files
os.system('rm ' + APPS_ROOT + '/' +
row[0].split('.')[0] + '_c137.json')
os.system('rm ' + APPS_ROOT + '/' +
row[0].split('.')[0] + '_i131.json')
os.system('rm ' + APPS_ROOT + '/' + row[0])
os.system('rm ' + APPS_ROOT + '/' + 'int_' + row[0])
# os.system('rm ' + APPS_ROOT + '/' + res[0])
stations.append(str(row[2]))
scores.append(top3_scores[top3_names.index(row[2])])
# Choose selected pollutant
if pollutant == 'C137':
dispersions.append(json.dumps(c137_json))
else:
dispersions.append(json.dumps(i131_json))
# If dispersion has been cached
else:
# os.system('rm ' + APPS_ROOT + '/' + res[0])
stations.append(str(row[2]))
scores.append(top3_scores[top3_names.index(row[2])])
if pollutant == 'C137':
dispersions.append(json.dumps(row[3]))
else:
dispersions.append(json.dumps(row[4]))
return scores, dispersions, stations
# This function is the one that is called by the controller when clustering models
# have been selected. It uses the above functions in order to function properly.
def detections(cur, models, lat_lon, date, pollutant, metric, origin):
# Load weather variable
(items,res) = load_weather_data(cur, date, origin)
# Get best cluster candidate
cluster_date = load_cluster_date(items, models, origin)
descriptor = origin.split('_')
descriptor = descriptor[len(descriptor) - 1]
timestamp = datetime.datetime.strptime(cluster_date, '%y-%m-%d-%H')
# Get scores for each station for the best cluster candidate
results = calc_station_scores(cur, lat_lon, timestamp, origin, descriptor, pollutant)
# Sort scores
results = sorted(results, key=lambda k: k[1] if k[
1] > 0 else float('inf'), reverse=False)
# Get top 3 stations
top3 = results[:3]
print top3
# Turn top 3 station dispersion to visualization friendly form
scores, dispersions, stations = get_top3_stations(cur, top3, timestamp, origin, pollutant)
# Convert results to JSON form
scores, dispersions, stations = zip(
*sorted(zip(scores, dispersions, stations), key=lambda k: k[0] if k[0] > 0 else float('inf'), reverse=False))
send = {}
send['stations'] = stations
send['scores'] = scores
send['dispersions'] = dispersions
return json.dumps(send)
# This function returns all ingested models/methods available to the user.
def get_methods(cur):
res = DBConn().safequery("select origin,html from models;")
origins = []
for row in res:
origin = {}
origin['html'] = row[1]
origin['origin'] = row[0]
origins.append(origin)
return json.dumps(origins)
# This method return the best real weather data candidate for a given date
def get_closest_weather(cur, date, level):
level = int(level)
# Query based on weather pressure level and date
if level == 22:
resp = DBConn().safequery("select filename,hdfs_path,wind_dir500,EXTRACT(EPOCH FROM TIMESTAMP '" +
date + "' - date)/3600/24 as diff from weather group by date\
having EXTRACT(EPOCH FROM TIMESTAMP '" + date + "' - date)/3600/24 >= 0 order by diff;")
elif level == 26:
resp = DBConn().safequery("select filename,hdfs_path,wind_dir700,EXTRACT(EPOCH FROM TIMESTAMP '" +
date + "' - date)/3600/24 as diff from weather group by date\
having EXTRACT(EPOCH FROM TIMESTAMP '" + date + "' - date)/3600/24 >= 0 order by diff;")
elif level == 33:
resp = DBConn().safequery("select filename,hdfs_path,wind_dir900,EXTRACT(EPOCH FROM TIMESTAMP '" +
date + "' - date)/3600/24 as diff from weather group by date\
having EXTRACT(EPOCH FROM TIMESTAMP '" + date + "' - date)/3600/24 >= 0 order by diff;")
res = resp.fetchone()
# Check if date result is in bounds
if res[3] > 5:
return json.dumps({'error': 'date is out of bounds'})
# Check if weather data has already been cached
if res[2] == None:
# If not, get original NetCDF file
urllib.urlretrieve(res[1], res[0])
# Convert NetCDF to GeoJSON format for visualization purposes
json_dir = calc_winddir(res[0], level)
# Delete used files
os.system('rm ' + APPS_ROOT + '/' + res[0])
# Update database record, therefore cache the visualization format for
# this weather file
if level == 22:
DBConn().safequery("UPDATE weather SET wind_dir500=\'" +
json_dir + "\' WHERE filename=\'" + res[0] + "\'")
elif level == 26:
DBConn().safequery("UPDATE weather SET wind_dir700=\'" +
json_dir + "\' WHERE filename=\'" + res[0] + "\'")
elif level == 33:
DBConn().safequery("UPDATE weather SET wind_dir900=\'" +
json_dir + "\' WHERE filename=\'" + res[0] + "\'")
return json_dir
# If already cached
else:
return json.dumps(res[2])
# This function loads the dispersion grid used in combination with SEMAGROW in
# order to return affected areas. The difference with the load_lat_lon is that
# this grid is split into cells rather than (lat,lon) points.
def load_gridcells():
with open('dispersion_grid.json') as ff:
cells = json.load(ff)
cell_pols = []
for cell in cells:
points = []
points.append(Point(float(cell['bottom_left']['lon']),float(cell['bottom_left']['lat'])))
points.append(Point(float(cell['top_left']['lon']),float(cell['top_left']['lat'])))
points.append(Point(float(cell['top_right']['lon']),float(cell['top_right']['lat'])))
points.append(Point(float(cell['bottom_right']['lon']),float(cell['bottom_right']['lat'])))
# Convert them into Shapely Polygons for convinient manipulation
pol = Polygon([[p.x, p.y] for p in points])
cell_pol = {}
cell_pol['id'] = cell['id']
cell_pol['obj'] = pol
cell_pols.append(cell_pol)
return cell_pols
# This function is used in order to log the computation time of various functions
def timing(start, end):
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds))
from SPARQLWrapper import SPARQLWrapper, JSON
# This function performs the necessary queries in order to get affected areas,
# given an endpoint and a single id of the grid cell
def single_query(semagrow, cell_id):
semagrow.setQuery("""
PREFIX strdf: <http://strdf.di.uoa.gr/ontology#>
SELECT ?geoname ?lat ?long ?name ?population
WHERE
{ <http://iit.demokritos.gr/%s> strdf:hasGeometry ?geometry .
?geoname <http://www.opengis.net/ont/geosparql#asWKT> ?point ;
<http://www.w3.org/2003/01/geo/wgs84_pos#lat> ?lat ;
<http://www.w3.org/2003/01/geo/wgs84_pos#long> ?long ;
<http://www.geonames.org/ontology#name> ?name ;
<http://www.geonames.org/ontology#population> ?population .
FILTER strdf:within(?point, ?geometry)
}
"""%cell_id)
semagrow.setReturnFormat(JSON)
results = semagrow.queryAndConvert()
return results
# This function performs the necessary queries in order to get affected areas,
# given an endpoint and multiple cell id's (batches).
def query(semagrow, cell_id):
values = ""
for id in cell_id:
values = values + "<http://iit.demokritos.gr/"+str(id)+"> "
semagrow.setQuery("""
PREFIX strdf: <http://strdf.di.uoa.gr/ontology#>
SELECT ?geoname ?lat ?long ?name ?population
WHERE
{ ?cellid strdf:hasGeometry ?geometry .
?geoname <http://www.opengis.net/ont/geosparql#asWKT> ?point ;
<http://www.w3.org/2003/01/geo/wgs84_pos#lat> ?lat ;
<http://www.w3.org/2003/01/geo/wgs84_pos#long> ?long ;
<http://www.geonames.org/ontology#name> ?name ;
<http://www.geonames.org/ontology#population> ?population .
VALUES ?cellid { %s }
FILTER strdf:within(?point, ?geometry)
}
"""%values)
semagrow.setReturnFormat(JSON)
results = semagrow.queryAndConvert()
return results
def hospital_query(semagrow, cell_id):
values = ""
for id in cell_id:
values = values + "<http://iit.demokritos.gr/"+str(id)+"> "
semagrow.setQuery("""
PREFIX strdf: <http://strdf.di.uoa.gr/ontology#>
SELECT ?osm ?point ?tags WHERE {
?cellid strdf:hasGeometry ?geometry .
?osm <http://www.opengis.net/ont/geosparql#asWKT> ?point .
?osm <http://openstreetmap.org/id> ?id .
?s <http://cassandra.semagrow.eu/openstreetmap/hospitals#node_id> ?id .
?s <http://cassandra.semagrow.eu/openstreetmap/hospitals#tags> ?tags .
VALUES ?cellid { %s }
FILTER strdf:within(?point, ?geometry)
}
"""%values)
semagrow.setReturnFormat(JSON)
results = semagrow.queryAndConvert()
return results
# This is the function called by the controller in order to return affected from
# the dispersion areas (when querying each id individually).
def single_pop(cell_pols,disp):
start = time.time()
# Load dispersion in the JSON format
disp = json.loads(disp)
# Turn JSON into shapely polygons
multi = MultiPolygon([shape(pol['geometry']) for pol in disp['features']])
# Get intersection with our grid, therefore which cells are being affected_ids
# by the dispersion
affected_ids = [pol['id'] for pol in cell_pols if multi.intersects(pol['obj'])]
# Remove duplicate entries
affected_ids = list(set(affected_ids))
multi_points = []
# For each id query SEMAGROW to get more info
for id in affected_ids:
try:
semagrow = SPARQLWrapper('http://10.0.10.11:9999/SemaGrow/sparql')
results = single_query(semagrow,id)
points = [(Point(float(res['long']['value']),float(res['lat']['value'])),int(res['population']['value']),res['geoname']['value'],res['name']['value']) for res in results['results']['bindings']]
multi_points.append(points)
except:
pass
# Collapse multi points into single list
multi_points = list(chain.from_iterable(multi_points))
jpols = []
timing(start,time.time())
start = time.time()
# Build response in JSON format
for p,point in enumerate(multi_points):
jpols.append(dict(type='Feature', properties={"POP":unicode(point[1]),"URI":unicode(point[2]),"NAME":unicode(point[3])}, geometry=mapping(point[0])))
end_res = dict(type='FeatureCollection', crs={ "type": "name", "properties": { "name":"urn:ogc:def:crs:OGC:1.3:CRS84" }},features=jpols)
timing(start,time.time())
return json.dumps(end_res)
# This is the function called by the controller in order to return affected from
# the dispersion areas (batch id querying).
def pop(cell_pols,disp):
start = time.time()
# Load dispersion in the JSON format
disp = json.loads(disp)
# Turn JSON into shapely polygons
multi = MultiPolygon([shape(pol['geometry']) for pol in disp['features']])
# Get intersection with our grid, therefore which cells are being affected_ids
# by the dispersion
affected_ids = [pol['id'] for pol in cell_pols if multi.intersects(pol['obj'])]
# Remove duplicate entries
affected_ids = list(set(affected_ids))
multi_points = []
# Open endpoint
semagrow = SPARQLWrapper('http://10.0.10.11:9999/SemaGrow/sparql')
# Batch query
for batch in range(0,len(affected_ids),semagrow_batch_size):
results = query(semagrow,affected_ids[batch:batch+semagrow_batch_size])
points = [(Point(float(res['long']['value']),float(res['lat']['value'])),int(res['population']['value']),res['geoname']['value'],res['name']['value']) for res in results['results']['bindings']]
multi_points.append(points)
# Collapse multi points into single list
multi_points = list(chain.from_iterable(multi_points))
jpols = []
timing(start,time.time())
# Build response in JSON format
for p,point in enumerate(multi_points):
jpols.append(dict(type='Feature', properties={"POP":unicode(point[1]),"URI":unicode(point[2]),"NAME":unicode(point[3])}, geometry=mapping(point[0])))
end_res = dict(type='FeatureCollection', crs={ "type": "name", "properties": { "name":"urn:ogc:def:crs:OGC:1.3:CRS84" }},features=jpols)
return json.dumps(end_res)
# This is the function called by the controller in order to return affected from
# the dispersion areas (batch id querying).
def hosp(cell_pols,disp):
start = time.time()
# Load dispersion in the JSON format
disp = json.loads(disp)
# Turn JSON into shapely polygons
multi = MultiPolygon([shape(pol['geometry']) for pol in disp['features']])
# Get intersection with our grid, therefore which cells are being affected_ids
# by the dispersion
affected_ids = [pol['id'] for pol in cell_pols if multi.intersects(pol['obj'])]
# Remove duplicate entries
affected_ids = list(set(affected_ids))
multi_points = []
# Open endpoint
semagrow = SPARQLWrapper('http://10.0.10.14:9999/SemaGrow/sparql')
# Batch query
for batch in range(0,len(affected_ids),semagrow_batch_size):
results = hospital_query(semagrow,affected_ids[batch:batch+semagrow_batch_size])
for res in results['results']['bindings']:
tup = re.match(r'POINT(.*)',res['point']['value']).group(1)
tup = tup.replace(')','')
tup = tup.replace('(','')
lon = tup.split(' ')[1]
lat = tup.split(' ')[2]
points = [(Point(float(lon),float(lat)),res['tags']['value'].encode('utf-8'))]
multi_points.append(points)
# Collapse multi points into single list
multi_points = list(chain.from_iterable(multi_points))
jpols = []
timing(start,time.time())
# Build response in JSON format
for p,point in enumerate(multi_points):
jpols.append(dict(type='Feature', properties={"TAGS":point[1]}, geometry=mapping(point[0])))
end_res = dict(type='FeatureCollection', crs={ "type": "name", "properties": { "name":"urn:ogc:def:crs:OGC:1.3:CRS84" }},features=jpols)
return json.dumps(end_res)
|
apache-2.0
|
jstoxrocky/statsmodels
|
statsmodels/examples/ex_univar_kde.py
|
34
|
5127
|
"""
This example tests the nonparametric estimator
for several popular univariate distributions with the different
bandwidth selction methods - CV-ML; CV-LS; Scott's rule of thumb.
Produces six different plots for each distribution
1) Beta
2) f
3) Pareto
4) Laplace
5) Weibull
6) Poisson
"""
from __future__ import print_function
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
KDEMultivariate = sm.nonparametric.KDEMultivariate
np.random.seed(123456)
# Beta distribution
# Parameters
a = 2
b = 5
nobs = 250
support = np.random.beta(a, b, size=nobs)
rv = stats.beta(a, b)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(1)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Beta Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# f distribution
df = 100
dn = 100
nobs = 250
support = np.random.f(dn, df, size=nobs)
rv = stats.f(df, dn)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(2)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of f Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Pareto distribution
a = 2
nobs = 150
support = np.random.pareto(a, size=nobs)
rv = stats.pareto(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(3)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Pareto " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Laplace Distribution
mu = 0
s = 1
nobs = 250
support = np.random.laplace(mu, s, size=nobs)
rv = stats.laplace(mu, s)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(4)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Laplace " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Weibull Distribution
a = 1
nobs = 250
support = np.random.weibull(a, size=nobs)
rv = stats.weibull_min(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(5)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Weibull " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Poisson Distribution
a = 2
nobs = 250
support = np.random.poisson(a, size=nobs)
rv = stats.poisson(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='o', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='o', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='o', bw='cv_ml')
plt.figure(6)
plt.plot(support[ix], rv.pmf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Poisson " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
plt.show()
|
bsd-3-clause
|
imaculate/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
2
|
54750
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"]) * -1
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true,
rng.randint(-100, 100) * np.ones(20, dtype=int))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='macro')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
kfogel/batman
|
batman/tests.py
|
1
|
1757
|
# The batman package: fast computation of exoplanet transit light curves
# Copyright (C) 2015 Laura Kreidberg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import numpy as np
import math
import matplotlib.pyplot as plt
from .transitmodel import *
def test():
print("Starting tests...")
failures = 0
params = TransitParams()
params.t0 = 0.
params.per = 1.0
params.rp = 0.1
params.a = 15.23
params.inc = 1.555*180./math.pi
params.ecc = 0.
params.w = 90.
params.u = np.array([0.0, 0.7, 0.0, -0.3])
params.limb_dark = "nonlinear"
t = np.linspace(0.01, 0.05, 1000)
err_max = 0.7
m = TransitModel(params, t, err_max)
nonlinear_lc = m.LightCurve(params)
err = m.calc_err()
if err > err_max: failures += 1
params.limb_dark = "quadratic"
params.u = [0.1,0.3]
m = TransitModel(params, t, err_max)
quadratic_lc = m.LightCurve(params)
if np.max(np.abs(quadratic_lc-nonlinear_lc))*1.0e6 > err_max: failures += 1
# print(np.max(np.abs(quadratic_lc-nonlinear_lc))*1.0e6)
# plt.plot((quadratic_lc - nonlinear_lc)*1.0e6)
# plt.show()
print("Tests finished with " + "{0}".format(failures) + " failures")
|
gpl-3.0
|
smartscheduling/scikit-learn-categorical-tree
|
examples/model_selection/plot_learning_curve.py
|
250
|
4171
|
"""
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
|
bsd-3-clause
|
amirchohan/HDR
|
img_hist.py
|
1
|
3159
|
#!/usr/bin/python
# RGB Hitogram
# This script will create a histogram image based on the RGB content of
# an image. It uses PIL to do most of the donkey work but then we just
# draw a pretty graph out of it.
#
# May 2009, Scott McDonough, www.scottmcdonough.co.uk
#
import os
import sys
import Image, ImageDraw, ImageStat
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
imagepath = sys.argv[1] # The image to build the histogram of
histHeight = 120 # Height of the histogram
histWidth = 256 # Width of the histogram
multiplerValue = 1 # The multiplier value basically increases
# the histogram height so that love values
# are easier to see, this in effect chops off
# the top of the histogram.
showFstopLines = True # True/False to hide outline
fStopLines = 5
# Colours to be used
backgroundColor = (51,51,51) # Background color
lineColor = (102,102,102) # Line color of fStop Markers
red = (255,60,60) # Color for the red lines
green = (51,204,51) # Color for the green lines
blue = (0,102,255) # Color for the blue lines
##################################################################################
img = Image.open(imagepath)
if len(sys.argv) > 2 and sys.argv[2] == "brightness_hist":
backgroundColor = (255,255,255) # Background color
red = (51,51,51) # Background color
green = (51,51,51) # Background color
blue = (51,51,51) # Background color
width, height = img.size
pix = img.load()
for x in range(width):
for y in range(height):
brightness = max(pix[x, y])
pix[x, y] = (brightness, brightness, brightness)
hist = img.histogram()
histMax = max(hist) # comon color
#average = sum(hist)/len(hist)
#histMax = 0
#for i in hist:
# if int(i) > 3*average: pass
# elif int(i) > histMax:
# histMax = int(i)
#histHeight = histMax
xScale = float(histWidth)/len(hist) # xScaling
yScale = float((histHeight)*multiplerValue)/histMax # yScaling
im = Image.new("RGBA", (histWidth, histHeight), backgroundColor)
draw = ImageDraw.Draw(im)
# Draw Outline is required
if showFstopLines:
xmarker = histWidth/fStopLines
x =0
for i in range(1,fStopLines+1):
draw.line((x, 0, x, histHeight), fill=lineColor)
x+=xmarker
draw.line((histWidth-1, 0, histWidth-1, 200), fill=lineColor)
draw.line((0, 0, 0, histHeight), fill=lineColor)
# Draw the RGB histogram lines
x=0; c=0;
for i in hist:
if int(i)==0: pass
else:
color = red
if c>255: color = green
if c>511: color = blue
draw.line((x, histHeight, x, histHeight-i*yScale), fill=color)
if x>255: x=0
else: x+=1
c+=1
#resize the image
#resize_factor = 1
#im = im.resize((histWidth*resize_factor, histHeight*resize_factor), Image.NEAREST)
x,y = np.random.rand(2,10)
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(im,extent=[0,histWidth, 0,histHeight])
# Now save and show the histogram
output_file_name = os.path.splitext(imagepath)[0] + "_histogram.png"
plt.savefig(output_file_name)
|
bsd-3-clause
|
amozie/amozie
|
studzie/time_series_feature_select.py
|
1
|
1046
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from sklearn.ensemble import RandomForestRegressor as rfr
from sklearn.feature_selection import RFE
# 加载数据
series = pd.Series.from_csv('./dataset/monthly-car-sales-in-quebec-1960.csv', header=0)
# 平稳化
diff = series.diff(12)[12:]
# 自相关图
plot_acf(diff)
plot_pacf(diff)
# 创建一系列滞后数据
df = pd.DataFrame()
df['t'] = diff
for i in range(1, 13):
df['t-{0}'.format(str(i))] = diff.shift(i)
df = df.iloc[12:, :]
# 随机森林计算特征重要性
X = df.values[:, 1:]
y = df.values[:, 0]
model = rfr(500, random_state=1)
model.fit(X, y)
fi = model.feature_importances_
plt.bar(np.arange(1, fi.size+1), fi)
# RFE选择特征
rfe = RFE(rfr(500, random_state=1), 4)
fit = rfe.fit(X, y)
print(df.columns[1:][fit.support_])
plt.bar(np.arange(1, fit.support_.size+1), fit.support_)
plt.bar(np.arange(1, fit.ranking_.size+1), fit.ranking_)
|
apache-2.0
|
planetarymike/IDL-Colorbars
|
IDL_py_test/011_BLUE-RED.py
|
1
|
5973
|
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0., 0., 0.],
[0., 0.00392157, 0.00392157],
[0., 0.00784314, 0.00784314],
[0., 0.0117647, 0.0117647],
[0., 0.0156863, 0.0156863],
[0., 0.0313725, 0.0313725],
[0., 0.0470588, 0.0470588],
[0., 0.0627451, 0.0627451],
[0., 0.0823529, 0.0823529],
[0., 0.0980392, 0.0980392],
[0., 0.113725, 0.113725],
[0., 0.129412, 0.129412],
[0., 0.14902, 0.14902],
[0., 0.164706, 0.164706],
[0., 0.180392, 0.180392],
[0., 0.196078, 0.196078],
[0., 0.215686, 0.215686],
[0., 0.231373, 0.231373],
[0., 0.247059, 0.247059],
[0., 0.262745, 0.262745],
[0., 0.282353, 0.282353],
[0., 0.298039, 0.298039],
[0., 0.313725, 0.313725],
[0., 0.329412, 0.329412],
[0., 0.34902, 0.34902],
[0., 0.364706, 0.364706],
[0., 0.380392, 0.380392],
[0., 0.396078, 0.396078],
[0., 0.415686, 0.415686],
[0., 0.431373, 0.431373],
[0., 0.447059, 0.447059],
[0., 0.462745, 0.462745],
[0., 0.482353, 0.482353],
[0., 0.498039, 0.498039],
[0., 0.513725, 0.513725],
[0., 0.529412, 0.529412],
[0., 0.54902, 0.54902],
[0., 0.564706, 0.564706],
[0., 0.580392, 0.580392],
[0., 0.596078, 0.596078],
[0., 0.615686, 0.615686],
[0., 0.631373, 0.631373],
[0., 0.647059, 0.647059],
[0., 0.662745, 0.662745],
[0., 0.682353, 0.682353],
[0., 0.698039, 0.698039],
[0., 0.713725, 0.713725],
[0., 0.729412, 0.729412],
[0., 0.74902, 0.74902],
[0., 0.764706, 0.764706],
[0., 0.780392, 0.780392],
[0., 0.796078, 0.796078],
[0., 0.815686, 0.815686],
[0., 0.831373, 0.831373],
[0., 0.847059, 0.847059],
[0., 0.862745, 0.862745],
[0., 0.882353, 0.882353],
[0., 0.898039, 0.898039],
[0., 0.913725, 0.913725],
[0., 0.929412, 0.929412],
[0., 0.94902, 0.94902],
[0., 0.964706, 0.964706],
[0., 0.980392, 0.980392],
[0., 1., 1.],
[0., 1., 1.],
[0., 0.984314, 1.],
[0., 0.968627, 1.],
[0., 0.952941, 1.],
[0., 0.937255, 1.],
[0., 0.921569, 1.],
[0., 0.905882, 1.],
[0., 0.890196, 1.],
[0., 0.87451, 1.],
[0., 0.858824, 1.],
[0., 0.843137, 1.],
[0., 0.827451, 1.],
[0., 0.811765, 1.],
[0., 0.796078, 1.],
[0., 0.780392, 1.],
[0., 0.764706, 1.],
[0., 0.74902, 1.],
[0., 0.733333, 1.],
[0., 0.717647, 1.],
[0., 0.701961, 1.],
[0., 0.686275, 1.],
[0., 0.666667, 1.],
[0., 0.65098, 1.],
[0., 0.635294, 1.],
[0., 0.619608, 1.],
[0., 0.603922, 1.],
[0., 0.588235, 1.],
[0., 0.572549, 1.],
[0., 0.556863, 1.],
[0., 0.541176, 1.],
[0., 0.52549, 1.],
[0., 0.509804, 1.],
[0., 0.494118, 1.],
[0., 0.478431, 1.],
[0., 0.462745, 1.],
[0., 0.447059, 1.],
[0., 0.431373, 1.],
[0., 0.415686, 1.],
[0., 0.4, 1.],
[0., 0.384314, 1.],
[0., 0.368627, 1.],
[0., 0.352941, 1.],
[0., 0.333333, 1.],
[0., 0.317647, 1.],
[0., 0.301961, 1.],
[0., 0.286275, 1.],
[0., 0.270588, 1.],
[0., 0.254902, 1.],
[0., 0.239216, 1.],
[0., 0.223529, 1.],
[0., 0.207843, 1.],
[0., 0.192157, 1.],
[0., 0.176471, 1.],
[0., 0.160784, 1.],
[0., 0.145098, 1.],
[0., 0.129412, 1.],
[0., 0.113725, 1.],
[0., 0.0980392, 1.],
[0., 0.0823529, 1.],
[0., 0.0666667, 1.],
[0., 0.0509804, 1.],
[0., 0.0352941, 1.],
[0., 0.0196078, 1.],
[0., 0., 1.],
[0., 0., 1.],
[0.0156863, 0., 1.],
[0.0313725, 0., 1.],
[0.0470588, 0., 1.],
[0.0627451, 0., 1.],
[0.0784314, 0., 1.],
[0.0941176, 0., 1.],
[0.109804, 0., 1.],
[0.12549, 0., 1.],
[0.141176, 0., 1.],
[0.156863, 0., 1.],
[0.172549, 0., 1.],
[0.188235, 0., 1.],
[0.203922, 0., 1.],
[0.219608, 0., 1.],
[0.235294, 0., 1.],
[0.25098, 0., 1.],
[0.266667, 0., 1.],
[0.282353, 0., 1.],
[0.298039, 0., 1.],
[0.313725, 0., 1.],
[0.333333, 0., 1.],
[0.34902, 0., 1.],
[0.364706, 0., 1.],
[0.380392, 0., 1.],
[0.396078, 0., 1.],
[0.411765, 0., 1.],
[0.427451, 0., 1.],
[0.443137, 0., 1.],
[0.458824, 0., 1.],
[0.47451, 0., 1.],
[0.490196, 0., 1.],
[0.505882, 0., 1.],
[0.521569, 0., 1.],
[0.537255, 0., 1.],
[0.552941, 0., 1.],
[0.568627, 0., 1.],
[0.584314, 0., 1.],
[0.6, 0., 1.],
[0.615686, 0., 1.],
[0.631373, 0., 1.],
[0.647059, 0., 1.],
[0.666667, 0., 1.],
[0.682353, 0., 1.],
[0.698039, 0., 1.],
[0.713725, 0., 1.],
[0.729412, 0., 1.],
[0.745098, 0., 1.],
[0.760784, 0., 1.],
[0.776471, 0., 1.],
[0.792157, 0., 1.],
[0.807843, 0., 1.],
[0.823529, 0., 1.],
[0.839216, 0., 1.],
[0.854902, 0., 1.],
[0.870588, 0., 1.],
[0.886275, 0., 1.],
[0.901961, 0., 1.],
[0.917647, 0., 1.],
[0.933333, 0., 1.],
[0.94902, 0., 1.],
[0.964706, 0., 1.],
[0.980392, 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 0.984314],
[1., 0., 0.968627],
[1., 0., 0.952941],
[1., 0., 0.937255],
[1., 0., 0.921569],
[1., 0., 0.905882],
[1., 0., 0.890196],
[1., 0., 0.87451],
[1., 0., 0.854902],
[1., 0., 0.839216],
[1., 0., 0.823529],
[1., 0., 0.807843],
[1., 0., 0.792157],
[1., 0., 0.776471],
[1., 0., 0.760784],
[1., 0., 0.745098],
[1., 0., 0.729412],
[1., 0., 0.709804],
[1., 0., 0.694118],
[1., 0., 0.678431],
[1., 0., 0.662745],
[1., 0., 0.647059],
[1., 0., 0.631373],
[1., 0., 0.615686],
[1., 0., 0.6],
[1., 0., 0.584314],
[1., 0., 0.564706],
[1., 0., 0.54902],
[1., 0., 0.533333],
[1., 0., 0.517647],
[1., 0., 0.501961],
[1., 0., 0.486275],
[1., 0., 0.470588],
[1., 0., 0.454902],
[1., 0., 0.439216],
[1., 0., 0.419608],
[1., 0., 0.403922],
[1., 0., 0.388235],
[1., 0., 0.372549],
[1., 0., 0.356863],
[1., 0., 0.341176],
[1., 0., 0.32549],
[1., 0., 0.309804],
[1., 0., 0.294118],
[1., 0., 0.27451],
[1., 0., 0.258824],
[1., 0., 0.243137],
[1., 0., 0.227451],
[1., 0., 0.211765],
[1., 0., 0.196078],
[1., 0., 0.180392],
[1., 0., 0.164706],
[1., 0., 0.14902],
[1., 0., 0.129412],
[1., 0., 0.113725],
[1., 0., 0.0980392],
[1., 0., 0.0823529],
[1., 0., 0.0666667],
[1., 0., 0.0509804],
[1., 0., 0.0352941],
[1., 0., 0.0196078],
[1., 0., 0.],
[1., 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
gpl-2.0
|
hunering/demo-code
|
python/books/DLWP/5.4.3-cam.py
|
1
|
1670
|
from keras import models
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import cv2
from utils import init_keras
init_keras()
img_path = r"C:\Users\huxiaomi\Downloads\deep-learning\data\kaggle-dogs-vs-cats\small\test\cats\cat.1502.jpg"
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
model = VGG16(weights='imagenet')
preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
idx_of_max = np.argmax(preds[0])
hit_output = model.output[:, idx_of_max]
last_conv_layer = model.get_layer('block5_conv3')
grads = K.gradients(hit_output, last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis=(0, 1, 2))
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, last_conv_layer_output_value = iterate([x])
for i in range(512):
last_conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
heatmap = np.mean(last_conv_layer_output_value, axis=2)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
plt.matshow(heatmap)
plt.show()
img = cv2.imread(img_path)
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255*heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = heatmap * 0.4 + img
cv2.imwrite(r'C:\Users\huxiaomi\Downloads\deep-learning\data\kaggle-dogs-vs-cats\heatmap.jpg', superimposed_img)
superimposed_img = np.uint8(superimposed_img)
plt.imshow(superimposed_img)
plt.show()
|
gpl-3.0
|
rbrecheisen/pyminer
|
pyminer/network/classifiers.py
|
1
|
9893
|
__author__ = 'Ralph'
import os
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from base import Node
from base import InputPort
from base import OutputPort
class Classifier(Node):
def __init__(self, name):
super(Classifier, self).__init__(name)
self.add_input_port(
InputPort(name='input', data_type=pd.DataFrame))
self.add_output_port(
OutputPort(name='model', data_type=str))
self.add_output_port(
OutputPort(name='performance', data_type=float))
class SupportVectorMachine(Classifier):
def __init__(self):
super(SupportVectorMachine, self).__init__('SupportVectorMachine')
# Set configuration items
self.get_config().set('kernel_type', None)
self.get_config().set('kernel_types', ['linear', 'rbf'])
self.get_config().set('auto_detect', True)
self.get_config().set('target', None)
self.get_config().set('performance_measure', 'accuracy')
self.get_config().set('n_grid_folds', 5)
self.get_config().set('n_folds', 10)
self.get_config().set('C', 1.0)
self.get_config().set('gamma', 0.0)
self.get_config().set('model_output_dir', None)
def execute(self):
# Get data from input port
data = self.get_input_port('input').get_data()
if data is None:
return
# Get kernel types
kernel_types = self.get_config().get_list('kernel_types')
if kernel_types is None:
raise RuntimeError('Kernel types not available')
# Get SVM kernel type. Only 'linear' and 'rbf' are currently supported
kernel_type = self.get_config().get('kernel_type')
if kernel_type not in kernel_types:
raise RuntimeError('Kernel type ' + kernel_type + ' not supported')
# Check if hyper parameters should be auto-detected or not. For the
# RBF kernel we need a C and gamma parameter. For the linear kernel
# only the C parameter needs to be optimized
auto_detect = self.get_config().get_bool('auto_detect', True)
# Get target feature name
target = self.get_config().get('target')
if target is None:
raise RuntimeError('Property \'target\' missing')
# Get performance measure to calculate
perf_measure = self.get_config().get('performance_measure')
if perf_measure is None:
print('WARNING: no performance measure configured, using accuracy')
perf_measure = 'accuracy'
# Get nr. grid search CV folds. Set to 5 if not configured
n_grid_folds = self.get_config().get_int('n_grid_folds', 5)
# Check if target feature exists in input data
if target not in data.columns:
raise RuntimeError('Feature \'' + target + '\' does not exist')
# Split data into X,y
predictors = list(data.columns)
predictors.remove(target)
X = data[predictors].as_matrix()
y = data[target].as_matrix()
C = 1
gamma = 0
if kernel_type == 'rbf':
if auto_detect:
grid_search = GridSearchCV(
estimator=SVC(),
param_grid=self._get_param_grid(kernel_type),
scoring=perf_measure,
cv=n_grid_folds,
refit=True)
grid_search.fit(X, y)
C = grid_search.best_params_['C']
gamma = grid_search.best_params_['gamma']
else:
C = self.get_config().get_float('C')
if C is None:
raise RuntimeError('Property \'C\' missing')
gamma = self.get_config().get_float('gamma')
if gamma is None:
raise RuntimeError('Property \'gamma\' missing')
elif kernel_type == 'linear':
if auto_detect:
grid_search = GridSearchCV(
estimator=SVC(),
param_grid=self._get_param_grid(kernel_type),
scoring=perf_measure,
cv=n_grid_folds,
refit=True)
grid_search.fit(X, y)
C = grid_search.best_params_['C']
else:
C = self.get_config().get_float('C')
if C is None:
raise RuntimeError('Property \'C\' missing')
else:
raise RuntimeError('Unknown kernel type \'' + kernel_type + '\'')
# Get nr. cross validation folds to estimate performance
n_folds = self.get_config().get_int('n_folds', 10)
# Train SVM on cross-validated data to estimate its performance. If the
# hyper parameters were given, use these. If not, apply grid search in
# each fold before calculating the score.
scores = []
for i, (train, test) in enumerate(StratifiedKFold(y, n_folds=n_folds)):
if auto_detect:
# Hyper parameters were already optimized earlier so just
# plug them into the classifier
classifier = SVC(kernel=kernel_type, C=C, gamma=gamma)
classifier.fit(X[train], y[train])
else:
# Optimize hyper parameter to get most realistic
# generalization performance
classifier = GridSearchCV(
estimator=SVC(kernel=kernel_type),
param_grid=self._get_param_grid(kernel_type),
cv=n_grid_folds,
refit=True)
classifier.fit(X[train], y[train])
# Calculate performance measure
y_pred = classifier.predict(X[test])
y_true = y[test]
score = accuracy_score(y_true, y_pred)
scores.append(score)
# Calculate mean performance score
mean_score = np.mean(np.asarray(scores))
# Train SVM model on the full data set
if auto_detect:
classifier = GridSearchCV(
estimator=SVC(kernel=kernel_type),
param_grid=self._get_param_grid(kernel_type),
cv=5,
refit=True)
classifier.fit(X, y)
else:
classifier = SVC(kernel=kernel_type, C=C, gamma=gamma)
classifier.fit(X, y)
# Save model to output if configured to do so
model_output_dir = self.get_config().get('model_output_dir')
if model_output_dir is not None:
if model_output_dir.endswith(os.sep):
model_output_dir = model_output_dir[:1]
if not os.path.isdir(model_output_dir):
os.makedirs(model_output_dir)
model_output_file = os.path.join(model_output_dir, 'classifier.pkl')
joblib.dump(classifier, model_output_file)
self.get_output_port('model').set_data(model_output_file)
# Set output port performance data
self.get_output_port('performance').set_data(mean_score)
@staticmethod
def _get_param_grid(kernel):
if kernel == 'linear':
return [{
'C': [2**x for x in range(-5, 15, 2)],
'kernel': ['linear']}]
if kernel == 'rbf':
return [{
'C': [2**x for x in range(-5, 15, 2)],
'gamma': [2**x for x in range(-15, 4, 2)],
'kernel': ['rbf']}]
return None
@staticmethod
def _calculate_score(perf_measure, y_true, y_pred):
if perf_measure == 'accuracy':
return accuracy_score(y_true, y_pred)
if perf_measure == 'precision':
return precision_score(y_true, y_pred)
if perf_measure == 'recall':
return recall_score(y_true, y_pred)
if perf_measure == 'f1_score':
return f1_score(y_true, y_pred)
cm = confusion_matrix(y_true, y_pred)
tp = float(cm[0][0])
tn = float(cm[1][1])
fp = float(cm[1][0])
fn = float(cm[0][1])
if perf_measure == 'sensitivity':
sensitivity = tp / (tp + fn)
return sensitivity
if perf_measure == 'specificity':
specificity = tn / (tn + fp)
return specificity
if perf_measure == 'likelihood_ratio':
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
return sensitivity / (1 - specificity)
raise RuntimeError('Unsupported performance measure ' + perf_measure)
class ApplyModel(Node):
def __init__(self):
super(ApplyModel, self).__init__('ApplyModel')
self.add_input_port(
InputPort(name='input', data_type=pd.DataFrame))
self.add_input_port(
InputPort(name='model', data_type=str))
self.add_output_port(
OutputPort(name='output', data_type=pd.Series))
def execute(self):
self.check_config()
data = self.get_input_port('input').get_data()
if data is None:
return
model_input_file = self.get_input_port('model').get_data()
if model_input_file is None:
return
# Load classifier from file
classifier = joblib.load(model_input_file)
# Perform prediction on each example in X
X = data.as_matrix()
y_pred = pd.Series(classifier.predict(X))
# Set output port data
self.get_output_port('output').set_data(y_pred)
|
apache-2.0
|
robwarm/gpaw-symm
|
gpaw/atom/generator2.py
|
1
|
40677
|
# -*- coding: utf-8 -*-
import sys
from math import pi, exp, sqrt, log
import numpy as np
from scipy.optimize import fsolve
from scipy import __version__ as scipy_version
from ase.utils import prnt, devnull
from ase.units import Hartree
from ase.data import atomic_numbers, chemical_symbols
from gpaw.spline import Spline
from gpaw.setup import BaseSetup
from gpaw.version import version
from gpaw.basis_data import Basis
from gpaw.gaunt import make_gaunt
from gpaw.utilities import erf, pack2
from gpaw.setup_data import SetupData
from gpaw.atom.configurations import configurations
from gpaw.utilities.lapack import general_diagonalize
from gpaw.atom.aeatom import AllElectronAtom, Channel, parse_ld_str, colors, \
GaussianBasis
parameters = {
'H': ('1s,s,p', 0.9, {}),
'He': ('1s,s,p', 1.5, {}),
'Li': ('1s,2s,2p,p,d', 1.5, {}),
'Be': ('1s,2s,2p,p,d', 1.4, {}),
'B': ('2s,s,2p,p,d', 1.2, {}),
'C': ('2s,s,2p,p,d', 1.2, {}),
'N': ('2s,s,2p,p,d', [1.2, 1.3], {'r0': 1.1}),
'O': ('2s,s,2p,p,d', [1.2, 1.4], {}),
'F': ('2s,s,2p,p,d', [1.2,1.4], {}),
'Ne': ('2s,s,2p,p,d', 1.8, {}), # 10
'Na': ('2s,3s,2p,3p,d', 2.3, {'local': 'f'}),
'Mg': ('2s,3s,2p,3p,d', [2.0, 1.8], {'local': 'f'}),
'Al': ('3s,s,3p,p,d', 2.1, {'local': 'f'}),
'Si': ('3s,s,3p,p,d', 1.9, {'local': 'f'}),
'P': ('3s,s,3p,p,d', 1.7, {'local': 'f'}),
'S': ('3s,s,3p,p,d', 1.6, {'local': 'f'}),
'Cl': ('3s,s,3p,p,d', 1.5, {'local': 'f'}),
'Ar': ('3s,s,3p,p,d', 1.5, {'local': 'f'}),
'K': ('3s,4s,3p,4p,d,d', 2.1, {'local': 'f'}),
'Ca': ('3s,4s,3p,4p,3d,d', 2.1, {'local': 'f'}), # 20
'Sc': ('3s,4s,3p,4p,3d,d', 2.3, {'local': 'f'}),
'Ti': ('3s,4s,3p,4p,3d,d', [2.2, 2.2, 2.3], {'local': 'f'}),
'V': ('3s,4s,3p,4p,3d,d', [2.1, 2.1, 2.3], {'local': 'f'}),
'Cr': ('3s,4s,3p,4p,3d,d', [2.1, 2.1, 2.3], {'local': 'f'}),
'Mn': ('3s,4s,3p,4p,3d,d', [2.0, 2.0, 2.2], {'local': 'f'}),
'Fe': ('3s,4s,3p,4p,3d,d', 2.1, {'local': 'f'}),
'Co': ('3s,4s,3p,4p,3d,d', 2.1, {'local': 'f'}),
'Ni': ('3s,4s,3p,4p,3d,d', 2.0, {'local': 'f'}),
'Cu': ('3s,4s,3p,4p,3d,d', 1.9, {'local': 'f'}),
'Zn': ('3s,4s,3p,4p,3d,d', 1.9, {'local': 'f'}), # 30
'Ga': ('4s,s,4p,p,3d,d', 2.2, {'local': 'f'}),
'Ge': ('4s,s,4p,p,3d,d', 2.1, {'local': 'f'}),
'As': ('4s,s,4p,p,d', 2.0, {'local': 'f'}),
'Se': ('4s,s,4p,p,d', 2.1, {'local': 'f'}),
'Br': ('4s,s,4p,p,d', 2.1, {'local': 'f'}),
'Kr': ('4s,s,4p,p,d', 2.1, {'local': 'f'}),
'Rb': ('4s,5s,4p,5p,d,d', 2.5, {'local': 'f'}),
'Sr': ('4s,5s,4p,5p,4d,d', 2.5, {'local': 'f'}),
'Y': ('4s,5s,4p,5p,4d,d', 2.5, {'local': 'f'}),
'Zr': ('4s,5s,4p,5p,4d,d', 2.5, {'local': 'f'}), # 40
'Nb': ('4s,5s,4p,5p,4d,d', [2.4,2.4,2.5], {'local': 'f'}),
'Mo': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'Tc': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'Ru': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'Rh': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'Pd': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'Ag': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'Cd': ('4s,5s,4p,5p,4d,d', 2.3, {'local': 'f'}),
'In': ('5s,s,5p,p,4d,d', 2.6, {'local': 'f'}),
'Sn': ('5s,s,5p,p,4d,d', 2.5, {'local': 'f'}),
'Sb': ('5s,s,5p,p,4d,d', 2.5, {'local': 'f'}),
'Te': ('5s,6s,5p,p,d,d', 2.5, {'local': 'f'}),
'I': ('5s,s,5p,p,d', 2.4, {'local': 'f'}),
'Xe': ('5s,s,5p,p,d', 2.3, {'local': 'f'}),
'Cs': ('5s,6s,5p,6p,5d', [1.9, 2.2], {}), # 55
'Ba': ('5s,6s,5p,6p,5d', [1.8, 2.2], {}),
'La': ('5s,6s,5p,6p,5d,d,4f,f', 2.5, {'local': 'g'}),
'Ce': ('5s,6s,5p,6p,5d,d,4f,f', 2.4, {'local': 'g'}),
'Pr': ('5s,6s,5p,6p,5d,d,4f,f', 2.3, {'local': 'g'}),
'Nd': ('5s,6s,5p,6p,5d,d,4f,f', 2.3, {'local': 'g'}), # 60
'Pm': ('5s,6s,5p,6p,5d,d,4f,f', 2.3, {'local': 'g'}),
'Sm': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Eu': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Gd': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Tb': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}), # 65
'Dy': ('5s,6s,5p,6p,5d,d,4f,f', 2.1, {'local': 'g'}),
'Ho': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Er': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Tm': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Yb': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}), # 70
'Lu': ('5s,6s,5p,6p,5d,d,4f,f', 2.2, {'local': 'g'}),
'Hf': ('5s,6s,5p,6p,5d,d', 2.4, {'local': 'f'}),
'Ta': ('5s,6s,5p,6p,5d,d', 2.4, {'local': 'f'}),
'W': ('5s,6s,5p,6p,5d,d', 2.4, {'local': 'f'}),
'Re': ('5s,6s,5p,6p,5d,d', 2.4, {'local': 'f'}), # 75
'Os': ('5s,6s,5p,6p,5d,d', 2.4, {'local': 'f'}),
'Ir': ('5s,6s,5p,6p,5d,d', 2.4, {'local': 'f'}),
'Pt': ('5s,6s,5p,6p,5d,d', 2.3, {'local': 'f'}),
'Au': ('5s,6s,5p,6p,5d,d', 2.3, {'local': 'f'}),
'Hg': ('5s,6s,5p,6p,5d,d', 2.3, {'local': 'f'}), # 80
'Tl': ('6s,s,6p,p,5d,d', 2.8, {'local': 'f'}),
'Pb': ('6s,s,6p,p,5d,d', 2.6, {'local': 'f'}),
'Bi': ('6s,s,6p,p,5d,d', 2.6, {'local': 'f'}),
'Po': ('6s,s,6p,p,d', 2.7, {'local': 'f'}),
'At': ('6s,s,6p,p,d', 2.6, {'local': 'f'}),
'Rn': ('6s,s,6p,p,d', 2.6, {'local': 'f'}),
}
class PAWWaves:
def __init__(self, rgd, l, rcut):
self.rgd = rgd
self.l = l
self.rcut = rcut
self.n_n = []
self.e_n = []
self.f_n = []
self.phi_ng = []
self.phit_ng = None
self.pt_ng = None
def __len__(self):
return len(self.n_n)
def add(self, phi_g, n, e, f):
self.phi_ng.append(phi_g)
self.n_n.append(n)
self.e_n.append(e)
self.f_n.append(f)
def pseudize(self, type, nderiv):
rgd = self.rgd
if type == 'poly':
ps = rgd.pseudize
elif type == 'bessel':
ps = rgd.jpseudize
phi_ng = self.phi_ng = np.array(self.phi_ng)
N = len(phi_ng)
phit_ng = self.phit_ng = rgd.empty(N)
gcut = rgd.ceil(self.rcut)
self.nt_g = 0
self.c_n = []
for n in range(N):
phit_ng[n], c = ps(phi_ng[n], gcut, self.l, nderiv)
self.c_n.append(c)
self.nt_g += self.f_n[n] / 4 / pi * phit_ng[n]**2
self.dS_nn = np.empty((N, N))
for n1 in range(N):
for n2 in range(N):
self.dS_nn[n1, n2] = rgd.integrate(
phi_ng[n1] * phi_ng[n2] -
phit_ng[n1] * phit_ng[n2]) / (4 * pi)
self.Q = np.dot(self.f_n, self.dS_nn.diagonal())
def construct_projectors(self, vtr_g, rcmax):
N = len(self)
if N == 0:
self.pt_ng = []
return
rgd = self.rgd
phit_ng = self.phit_ng
gcmax = rgd.ceil(rcmax)
r_g = rgd.r_g
l = self.l
dgdr_g = 1 / rgd.dr_g
d2gdr2_g = rgd.d2gdr2()
q_ng = rgd.zeros(N)
for n in range(N):
a_g, dadg_g, d2adg2_g = rgd.zeros(3)
a_g[1:] = self.phit_ng[n, 1:] / r_g[1:]**l
a_g[0] = self.c_n[n]
dadg_g[1:-1] = 0.5 * (a_g[2:] - a_g[:-2])
d2adg2_g[1:-1] = a_g[2:] - 2 * a_g[1:-1] + a_g[:-2]
q_g = (vtr_g - self.e_n[n] * r_g) * self.phit_ng[n]
q_g -= 0.5 * r_g**l * (
(2 * (l + 1) * dgdr_g + r_g * d2gdr2_g) * dadg_g +
r_g * d2adg2_g * dgdr_g**2)
q_g[gcmax:] = 0
q_g[1:] /= r_g[1:]
if l == 0:
q_g[0] = q_g[1]
q_ng[n] = q_g
A_nn = rgd.integrate(phit_ng[:, None] * q_ng) / (4 * pi)
self.dH_nn = self.e_n * self.dS_nn - A_nn
L_nn = np.eye(N)
U_nn = A_nn.copy()
if N - self.n_n.count(-1) == 1:
assert self.n_n[0] != -1
# We have a single bound-state projector.
for n1 in range(N):
for n2 in range(n1 + 1, N):
L_nn[n2, n1] = U_nn[n2, n1] / U_nn[n1, n1]
U_nn[n2] -= U_nn[n1] * L_nn[n2, n1]
iL_nn = np.linalg.inv(L_nn)
phit_ng[:] = np.dot(iL_nn, phit_ng)
self.phi_ng[:] = np.dot(iL_nn, self.phi_ng)
self.dS_nn = np.dot(np.dot(iL_nn, self.dS_nn), iL_nn.T)
self.dH_nn = np.dot(np.dot(iL_nn, self.dH_nn), iL_nn.T)
self.pt_ng = np.dot(np.linalg.inv(U_nn.T), q_ng)
def calculate_kinetic_energy_correction(self, vr_g, vtr_g):
if len(self) == 0:
return
self.dekin_nn = (self.rgd.integrate(self.phit_ng[:, None] *
self.phit_ng *
vtr_g, -1) / (4 * pi) -
self.rgd.integrate(self.phi_ng[:, None] *
self.phi_ng *
vr_g, -1) / (4 * pi) +
self.dH_nn)
class PAWSetupGenerator:
def __init__(self, aea, projectors,
scalar_relativistic=False,
fd=sys.stdout):
"""fd: stream
Text output."""
self.aea = aea
if fd is None:
fd = devnull
self.fd = fd
self.lmax = -1
self.states = {}
for s in projectors.split(','):
l = 'spdf'.find(s[-1])
if len(s) == 1:
n = None
elif '.' in s:
n = float(s[:-1])
else:
n = int(s[:-1])
if l in self.states:
self.states[l].append(n)
else:
self.states[l] = [n]
if l > self.lmax:
self.lmax = l
# Add empty bound states:
for l, nn in self.states.items():
for n in nn:
if (isinstance(n, int) and
(l not in aea.f_lsn or n - l > len(aea.f_lsn[l][0]))):
aea.add(n, l, 0)
for l in range(self.lmax):
if l not in self.states:
states[l] = []
aea.initialize()
aea.run()
aea.scalar_relativistic = scalar_relativistic
aea.refine()
self.rgd = aea.rgd
self.vtr_g = None
self.log('\nGenerating PAW', aea.xc.name, 'setup for', aea.symbol)
def construct_shape_function(self, alpha=None, rc=None, eps=1e-10):
"""Build shape-function for compensation charge."""
self.alpha = alpha
if self.alpha is None:
if isinstance(rc, list):
rc = min(rc)
rc = 1.5 * rc
def spillage(alpha):
"""Fraction of gaussian charge outside rc."""
x = alpha * rc**2
return 1 - erf(sqrt(x)) + 2 * sqrt(x / pi) * exp(-x)
def f(alpha):
return log(spillage(alpha)) - log(eps)
if scipy_version < '0.8':
self.alpha = fsolve(f, 7.0)
else:
self.alpha = fsolve(f, 7.0)[0]
self.alpha = round(self.alpha, 1)
self.log('Shape function: exp(-alpha*r^2), alpha=%.1f Bohr^-2' %
self.alpha)
self.ghat_g = (np.exp(-self.alpha * self.rgd.r_g**2) *
(self.alpha / pi)**1.5)
def log(self, *args, **kwargs):
prnt(file=self.fd, *args, **kwargs)
def calculate_core_density(self):
self.nc_g = self.rgd.zeros()
self.ncore = 0
self.nvalence = 0
self.ekincore = 0.0
for l, ch in enumerate(self.aea.channels):
for n, f in enumerate(ch.f_n):
if l <= self.lmax and n + l + 1 in self.states[l]:
self.nvalence += f
else:
self.nc_g += f * ch.calculate_density(n)
self.ncore += f
self.ekincore += f * ch.e_n[n]
self.ekincore -= self.rgd.integrate(self.nc_g * self.aea.vr_sg[0], -1)
self.log('Core electrons:', self.ncore)
self.log('Valence electrons:', self.nvalence)
def add_waves(self, rc):
if isinstance(rc, float):
radii = [rc]
else:
radii = rc
self.rcmax = max(radii)
if self.lmax >= 0:
radii += [radii[-1]] * (self.lmax + 1 - len(radii))
self.waves_l = []
for l in range(self.lmax + 1):
rcut = radii[l]
waves = PAWWaves(self.rgd, l, rcut)
e = -1.0
for n in self.states[l]:
if isinstance(n, int):
# Bound state:
ch = self.aea.channels[l]
e = ch.e_n[n - l - 1]
f = ch.f_n[n - l - 1]
phi_g = ch.phi_ng[n - l - 1]
else:
if n is None:
e += 1.0
else:
e = n
n = -1
f = 0.0
phi_g = self.rgd.zeros()
gc = self.rgd.round(1.5 * rcut)
ch = Channel(l)
ch.integrate_outwards(phi_g, self.rgd, self.aea.vr_sg[0], gc, e,
self.aea.scalar_relativistic)
phi_g[1:gc + 1] /= self.rgd.r_g[1:gc + 1]
if l == 0:
phi_g[0] = phi_g[1]
phi_g /= (self.rgd.integrate(phi_g**2) / (4*pi))**0.5
waves.add(phi_g, n, e, f)
self.waves_l.append(waves)
def pseudize(self, type='poly', nderiv=6, rcore=None):
self.Q = -self.aea.Z + self.ncore
self.nt_g = self.rgd.zeros()
for waves in self.waves_l:
waves.pseudize(type, nderiv)
self.nt_g += waves.nt_g
self.Q += waves.Q
if rcore is None:
rcore = self.rcmax * 0.8
else:
assert rcore <= self.rcmax
# Make sure pseudo density is monotonically decreasing:
while 1:
gcore = self.rgd.round(rcore)
self.nct_g = self.rgd.pseudize(self.nc_g, gcore)[0]
nt_g = self.nt_g + self.nct_g
dntdr_g = self.rgd.derivative(nt_g)[:gcore]
if dntdr_g.max() < 0.0:
break
rcore -= 0.01
if 1:
rcore *= 1.2
print rcore, '1.200000000000000000000000000'
gcore = self.rgd.round(rcore)
self.nct_g = self.rgd.pseudize(self.nc_g, gcore)[0]
nt_g = self.nt_g + self.nct_g
self.log('Constructing smooth pseudo core density for r < %.3f' %
rcore)
self.nt_g = nt_g
if 0:
# Constuct function that decrease smoothly from
# f(0)=1 to f(rcmax)=0:
x_g = self.rgd.r_g[:gcore] / self.rcmax
f_g = self.rgd.zeros()
f_g[:gcore] = (1 - x_g**2 * (3 - 2 * x_g))**2
# Add enough of f to nct to make nt monotonically decreasing:
dfdr_g = self.rgd.derivative(f_g)
A = (-dntdr_g / dfdr_g[:gcore]).max() * 1.5
self.nt_g += A * f_g
self.nct_g += A * f_g
self.log('Adding to nct ...')
self.npseudocore = self.rgd.integrate(self.nct_g)
self.log('Pseudo core electrons: %.6f' % self.npseudocore)
self.Q -= self.npseudocore
self.rhot_g = self.nt_g + self.Q * self.ghat_g
self.vHtr_g = self.rgd.poisson(self.rhot_g)
self.vxct_g = self.rgd.zeros()
exct_g = self.rgd.zeros()
self.exct = self.aea.xc.calculate_spherical(
self.rgd, self.nt_g.reshape((1, -1)), self.vxct_g.reshape((1, -1)))
self.v0r_g = self.vtr_g - self.vHtr_g - self.vxct_g * self.rgd.r_g
self.v0r_g[self.rgd.round(self.rcmax):] = 0.0
self.log('\nProjectors:')
self.log(' state occ energy norm rcut')
self.log(' nl [Hartree] [eV] [electrons] [Bohr]')
self.log('----------------------------------------------------------')
for l, waves in enumerate(self.waves_l):
for n, e, f, ds in zip(waves.n_n, waves.e_n, waves.f_n,
waves.dS_nn.diagonal()):
if n == -1:
self.log(' %s %10.6f %10.5f %19.2f' %
('spdf'[l], e, e * Hartree, waves.rcut))
else:
self.log(
' %d%s %2d %10.6f %10.5f %5.3f %9.2f' %
(n, 'spdf'[l], f, e, e * Hartree, 1 - ds,
waves.rcut))
self.log()
def find_local_potential(self, l0, r0, P, e0):
if l0 is None:
self.find_polynomial_potential(r0, P)
else:
self.match_local_potential(l0, r0, P, e0)
def find_polynomial_potential(self, r0, P, e0=None):
self.log('Constructing smooth local potential for r < %.3f' % r0)
g0 = self.rgd.ceil(r0)
assert e0 is None
self.vtr_g = self.rgd.pseudize(self.aea.vr_sg[0], g0, 1, P)[0]
self.l0 = None
self.e0 = None
self.r0 = r0
self.nderiv0 = P
def match_local_potential(self, l0, r0, P, e0):
self.log('Local potential matching %s-scattering at e=%.3f eV' %
('spdfg'[l0], e0 * Hartree) +
' and r=%.2f Bohr' % r0)
g0 = self.rgd.ceil(r0)
gc = g0 + 20
ch = Channel(l0)
phi_g = self.rgd.zeros()
ch.integrate_outwards(phi_g, self.rgd, self.aea.vr_sg[0], gc, e0,
self.aea.scalar_relativistic)
phi_g[1:gc] /= self.rgd.r_g[1:gc]
if l0 == 0:
phi_g[0] = phi_g[1]
#phit_g, c = self.rgd.pseudize_normalized(phi_g, g0, l=l0, points=P)
phit_g, c = self.rgd.pseudize(phi_g, g0, l=l0, points=P)
r_g = self.rgd.r_g[1:g0]
dgdr_g = 1 / self.rgd.dr_g
d2gdr2_g = self.rgd.d2gdr2()
a_g = phit_g.copy()
a_g[1:] /= self.rgd.r_g[1:]**l0
a_g[0] = c
dadg_g = self.rgd.zeros()
d2adg2_g = self.rgd.zeros()
dadg_g[1:-1] = 0.5 * (a_g[2:] - a_g[:-2])
d2adg2_g[1:-1] = a_g[2:] - 2 * a_g[1:-1] + a_g[:-2]
q_g = (((l0 + 1) * dgdr_g + 0.5 * self.rgd.r_g * d2gdr2_g) * dadg_g +
0.5 * self.rgd.r_g * d2adg2_g * dgdr_g**2)
q_g[:g0] /= a_g[:g0]
q_g += e0 * self.rgd.r_g
q_g[0] = 0.0
self.vtr_g = self.aea.vr_sg[0].copy()
self.vtr_g[0] = 0.0
self.vtr_g[1:g0] = q_g[1:g0]#e0 * r_g - t_g * r_g**(l0 + 1) / phit_g[1:g0]
self.l0 = l0
self.e0 = e0
self.r0 = r0
self.nderiv0 = P
def construct_projectors(self):
for waves in self.waves_l:
waves.construct_projectors(self.vtr_g, self.rcmax)
waves.calculate_kinetic_energy_correction(self.aea.vr_sg[0],
self.vtr_g)
def check_all(self):
self.log(('Checking eigenvalues of %s pseudo atom using ' +
'a Gaussian basis set:') % self.aea.symbol)
self.log(' AE [eV] PS [eV] error [eV]')
ok = True
for l in range(4):
try:
e_b, n0 = self.check(l)
except RuntimeError:
self.log('Singular overlap matrix!')
ok = False
continue
nbound = (e_b < -0.002).sum()
if l < len(self.aea.channels):
e0_b = self.aea.channels[l].e_n
nbound0 = (e0_b < -0.002).sum()
extra = 6
for n in range(1 + l, nbound0 + 1 + l + extra):
if n - 1 - l < len(self.aea.channels[l].f_n):
f = self.aea.channels[l].f_n[n - 1 - l]
self.log('%2d%s %2d' % (n, 'spdf'[l], f), end='')
else:
self.log(' ', end='')
self.log(' %15.3f' % (e0_b[n - 1 - l] * Hartree), end='')
if n - 1 - l - n0 >= 0:
self.log('%15.3f' * 2 %
(e_b[n - 1 - l - n0] * Hartree,
(e_b[n - 1 - l - n0] - e0_b[n - 1 - l]) *
Hartree))
else:
self.log()
if nbound != nbound0 - n0:
self.log('Wrong number of %s-states!' % 'spdf'[l])
ok = False
elif (nbound > 0 and
abs(e_b[:nbound] - e0_b[n0:nbound0]).max() > 1e-3):
self.log('Error in bound %s-states!' % 'spdf'[l])
ok = False
elif (abs(e_b[nbound:nbound + extra] -
e0_b[nbound0:nbound0 + extra]).max() > 2e-2):
self.log('Error in %s-states!' % 'spdf'[l])
ok = False
elif nbound > 0:
self.log('Wrong number of %s-states!' % 'spdf'[l])
ok = False
return ok
def check(self, l):
basis = self.aea.channels[0].basis
eps = basis.eps
alpha_B = basis.alpha_B
basis = GaussianBasis(l, alpha_B, self.rgd, eps)
H_bb = basis.calculate_potential_matrix(self.vtr_g)
H_bb += basis.T_bb
S_bb = np.eye(len(basis))
n0 = 0
if l < len(self.waves_l):
waves = self.waves_l[l]
if len(waves) > 0:
P_bn = self.rgd.integrate(basis.basis_bg[:, None] *
waves.pt_ng) / (4 * pi)
H_bb += np.dot(np.dot(P_bn, waves.dH_nn), P_bn.T)
S_bb += np.dot(np.dot(P_bn, waves.dS_nn), P_bn.T)
n0 = waves.n_n[0] - l - 1
if n0 < 0 and l < len(self.aea.channels):
n0 = (self.aea.channels[l].f_n > 0).sum()
elif l < len(self.aea.channels):
n0 = (self.aea.channels[l].f_n > 0).sum()
e_b = np.empty(len(basis))
general_diagonalize(H_bb, e_b, S_bb)
return e_b, n0
def test_convergence(self):
rgd = self.rgd
r_g = rgd.r_g
G_k, nt_k = self.rgd.fft(self.nt_g * r_g)
rhot_k = self.rgd.fft(self.rhot_g * r_g)[1]
ghat_k = self.rgd.fft(self.ghat_g * r_g)[1]
v0_k = self.rgd.fft(self.v0r_g)[1]
vt_k = self.rgd.fft(self.vtr_g)[1]
phi_k = self.rgd.fft(self.waves_l[0].phit_ng[0] * r_g)[1]
eee_k = 0.5 * nt_k**2 * (4 * pi)**2 / (2 * pi)**3
ecc_k = 0.5 * rhot_k**2 * (4 * pi)**2 / (2 * pi)**3
egg_k = 0.5 * ghat_k**2 * (4 * pi)**2 / (2 * pi)**3
ekin_k = 0.5 * phi_k**2 * G_k**4 / (2 * pi)**3
evt_k = nt_k * vt_k * G_k**2 * 4 * pi / (2 * pi)**3
eee = 0.5 * rgd.integrate(self.nt_g * rgd.poisson(self.nt_g), -1)
ecc = 0.5 * rgd.integrate(self.rhot_g * self.vHtr_g, -1)
egg = 0.5 * rgd.integrate(self.ghat_g * rgd.poisson(self.ghat_g), -1)
ekin = self.aea.ekin - self.ekincore - self.waves_l[0].dekin_nn[0, 0]
print self.aea.ekin, self.ekincore, self.waves_l[0].dekin_nn[0, 0]
evt = rgd.integrate(self.nt_g * self.vtr_g, -1)
import pylab as p
errors = 10.0**np.arange(-4, 0) / Hartree
self.log('\nConvergence of energy:')
self.log('plane-wave cutoff (wave-length) [ev (Bohr)]\n ', end='')
for de in errors:
self.log('%14.4f' % (de * Hartree), end='')
for label, e_k, e0 in [
('e-e', eee_k, eee),
('c-c', ecc_k, ecc),
('g-g', egg_k, egg),
('kin', ekin_k, ekin),
('vt', evt_k, evt)]:
self.log('\n%3s: ' % label, end='')
e_k = (np.add.accumulate(e_k) - 0.5 * e_k[0] - 0.5 * e_k) * G_k[1]
print e_k[-1],e0, e_k[-1]-e0
k = len(e_k) - 1
for de in errors:
while abs(e_k[k] - e_k[-1]) < de:
k -= 1
G = k * G_k[1]
ecut = 0.5 * G**2
h = pi / G
self.log(' %6.1f (%4.2f)' % (ecut * Hartree, h), end='')
p.semilogy(G_k, abs(e_k - e_k[-1]) * Hartree, label=label)
self.log()
p.axis(xmax=20)
p.xlabel('G')
p.ylabel('[eV]')
p.legend()
p.show()
def plot(self):
import matplotlib.pyplot as plt
r_g = self.rgd.r_g
plt.figure()
plt.plot(r_g, self.vxct_g, label='xc')
plt.plot(r_g[1:], self.v0r_g[1:] / r_g[1:], label='0')
plt.plot(r_g[1:], self.vHtr_g[1:] / r_g[1:], label='H')
plt.plot(r_g[1:], self.vtr_g[1:] / r_g[1:], label='ps')
plt.plot(r_g[1:], self.aea.vr_sg[0, 1:] / r_g[1:], label='ae')
plt.axis(xmax=2 * self.rcmax,
ymin=self.vtr_g[1] / r_g[1],
ymax=max(0, (self.v0r_g[1:] / r_g[1:]).max()))
plt.xlabel('radius [Bohr]')
plt.ylabel('potential [Ha]')
plt.legend()
plt.figure()
i = 0
for l, waves in enumerate(self.waves_l):
for n, e, phi_g, phit_g in zip(waves.n_n, waves.e_n,
waves.phi_ng, waves.phit_ng):
if n == -1:
gc = self.rgd.ceil(waves.rcut)
name = '*%s (%.2f Ha)' % ('spdf'[l], e)
else:
gc = len(self.rgd)
name = '%d%s (%.2f Ha)' % (n, 'spdf'[l], e)
plt.plot(r_g[:gc], (phi_g * r_g)[:gc], color=colors[i],
label=name)
plt.plot(r_g[:gc], (phit_g * r_g)[:gc], '--', color=colors[i])
i += 1
plt.axis(xmax=3 * self.rcmax)
plt.xlabel('radius [Bohr]')
plt.ylabel(r'$r\phi_{n\ell}(r)$')
plt.legend()
plt.figure()
i = 0
for l, waves in enumerate(self.waves_l):
for n, e, pt_g in zip(waves.n_n, waves.e_n, waves.pt_ng):
if n == -1:
name = '*%s (%.2f Ha)' % ('spdf'[l], e)
else:
name = '%d%s (%.2f Ha)' % (n, 'spdf'[l], e)
plt.plot(r_g, pt_g * r_g, color=colors[i], label=name)
i += 1
plt.axis(xmax=self.rcmax)
plt.legend()
def logarithmic_derivative(self, l, energies, rcut):
rgd = self.rgd
ch = Channel(l)
gcut = rgd.round(rcut)
N = 0
if l < len(self.waves_l):
# Nonlocal PAW stuff:
waves = self.waves_l[l]
if len(waves) > 0:
pt_ng = waves.pt_ng
dH_nn = waves.dH_nn
dS_nn = waves.dS_nn
N = len(pt_ng)
u_g = rgd.zeros()
u_ng = rgd.zeros(N)
dudr_n = np.empty(N)
logderivs = []
for e in energies:
dudr = ch.integrate_outwards(u_g, rgd, self.vtr_g, gcut, e)
u = u_g[gcut]
if N:
for n in range(N):
dudr_n[n] = ch.integrate_outwards(u_ng[n], rgd,
self.vtr_g, gcut, e,
pt_g=pt_ng[n])
A_nn = (dH_nn - e * dS_nn) / (4 * pi)
B_nn = rgd.integrate(pt_ng[:, None] * u_ng, -1)
c_n = rgd.integrate(pt_ng * u_g, -1)
d_n = np.linalg.solve(np.dot(A_nn, B_nn) + np.eye(N),
np.dot(A_nn, c_n))
u -= np.dot(u_ng[:, gcut], d_n)
dudr -= np.dot(dudr_n, d_n)
logderivs.append(dudr / u)
return logderivs
def make_paw_setup(self, tag=None):
aea = self.aea
setup = SetupData(aea.symbol, aea.xc.name, tag, readxml=False)
nj = sum(len(waves) for waves in self.waves_l)
setup.e_kin_jj = np.zeros((nj, nj))
setup.id_j = []
j1 = 0
for l, waves in enumerate(self.waves_l):
ne = 0
for n, f, e, phi_g, phit_g, pt_g in zip(waves.n_n, waves.f_n,
waves.e_n, waves.phi_ng,
waves.phit_ng,
waves.pt_ng):
setup.append(n, l, f, e, waves.rcut, phi_g, phit_g, pt_g)
if n == -1:
ne += 1
id = '%s-%s%d' % (aea.symbol, 'spdf'[l], ne)
else:
id = '%s-%d%s' % (aea.symbol, n, 'spdf'[l])
setup.id_j.append(id)
j2 = j1 + len(waves)
setup.e_kin_jj[j1:j2, j1:j2] = waves.dekin_nn
j1 = j2
setup.nc_g = self.nc_g * sqrt(4 * pi)
setup.nct_g = self.nct_g * sqrt(4 * pi)
setup.e_kinetic_core = self.ekincore
setup.vbar_g = self.v0r_g * sqrt(4 * pi)
setup.vbar_g[1:] /= self.rgd.r_g[1:]
setup.vbar_g[0] = setup.vbar_g[1]
setup.Z = aea.Z
setup.Nc = self.ncore
setup.Nv = self.nvalence
setup.e_kinetic = aea.ekin
setup.e_xc = aea.exc
setup.e_electrostatic = aea.eH + aea.eZ
setup.e_total = aea.exc + aea.ekin + aea.eH + aea.eZ
setup.rgd = self.rgd
setup.rcgauss = 1 / sqrt(self.alpha)
self.calculate_exx_integrals()
setup.ExxC = self.exxcc
setup.X_p = pack2(self.exxcv_ii)
setup.tauc_g = self.rgd.zeros()
setup.tauct_g = self.rgd.zeros()
#print 'no tau!!!!!!!!!!!'
if self.aea.scalar_relativistic:
reltype = 'scalar-relativistic'
else:
reltype = 'non-relativistic'
attrs = [('type', reltype),
('version', 2),
('name', 'gpaw-%s' % version)]
setup.generatorattrs = attrs
setup.l0 = self.l0
setup.e0 = self.e0
setup.r0 = self.r0
setup.nderiv0 = self.nderiv0
return setup
def calculate_exx_integrals(self):
# Find core states:
core = []
lmax = 0
for l, ch in enumerate(self.aea.channels):
for n, phi_g in enumerate(ch.phi_ng):
if (l >= len(self.waves_l) or
(l < len(self.waves_l) and
n + l + 1 not in self.waves_l[l].n_n)):
core.append((l, phi_g))
if l > lmax:
lmax = l
lmax = max(lmax, len(self.waves_l) - 1)
G_LLL = make_gaunt(lmax)
# Calculate core contribution to EXX energy:
self.exxcc = 0.0
j1 = 0
for l1, phi1_g in core:
f = 1.0
for l2, phi2_g in core[j1:]:
n_g = phi1_g * phi2_g
for l in range((l1 + l2) % 2, l1 + l2 + 1, 2):
G = (G_LLL[l1**2:(l1 + 1)**2,
l2**2:(l2 + 1)**2,
l**2:(l + 1)**2]**2).sum()
vr_g = self.rgd.poisson(n_g, l)
e = f * self.rgd.integrate(vr_g * n_g, -1) / 4 / pi
self.exxcc -= e * G
f = 2.0
j1 += 1
self.log('EXX (core-core):', self.exxcc, 'Hartree')
# Calculate core-valence contribution to EXX energy:
nj = sum(len(waves) for waves in self.waves_l)
ni = sum(len(waves) * (2 * l + 1)
for l, waves in enumerate(self.waves_l))
self.exxcv_ii = np.zeros((ni, ni))
i1 = 0
for l1, waves1 in enumerate(self.waves_l):
for phi1_g in waves1.phi_ng:
i2 = 0
for l2, waves2 in enumerate(self.waves_l):
for phi2_g in waves2.phi_ng:
X_mm = self.exxcv_ii[i1:i1 + 2 * l1 + 1,
i2:i2 + 2 * l2 + 1]
if (l1 + l2) % 2 == 0:
for lc, phi_g in core:
n_g = phi1_g * phi_g
for l in range((l1 + lc) % 2,
max(l1, l2) + lc + 1, 2):
vr_g = self.rgd.poisson(phi2_g * phi_g, l)
e = (self.rgd.integrate(vr_g * n_g, -1) /
(4 * pi))
for mc in range(2 * lc + 1):
for m in range(2 * l + 1):
G_L = G_LLL[:,
lc**2 + mc,
l**2 + m]
X_mm += np.outer(
G_L[l1**2:(l1 + 1)**2],
G_L[l2**2:(l2 + 1)**2]) * e
i2 += 2 * l2 + 1
i1 += 2 * l1 + 1
def str2z(x):
if isinstance(x, int):
return x
if x[0].isdigit():
return int(x)
return atomic_numbers[x]
def generate(argv=None):
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] element',
version='%prog 0.1')
parser.add_option('-f', '--xc-functional', type='string', default='LDA',
help='Exchange-Correlation functional ' +
'(default value LDA)',
metavar='<XC>')
parser.add_option('-P', '--projectors',
help='Projector functions - use comma-separated - ' +
'nl values, where n can be pricipal quantum number ' +
'(integer) or energy (floating point number). ' +
'Example: 2s,0.5s,2p,0.5p,0.0d.')
parser.add_option('-r', '--radius',
help='1.2 or 1.2,1.1,1.1')
parser.add_option('-0', '--zero-potential',
metavar='type,nderivs,radius,e0',
help='Parameters for zero potential.')
parser.add_option('-c', '--pseudo-core-density-radius', type=float,
metavar='radius',
help='Radius for pseudizing core density.')
parser.add_option('-z', '--pseudize',
metavar='type,nderivs',
help='Parameters for pseudizing wave functions.')
parser.add_option('-p', '--plot', action='store_true')
parser.add_option('-l', '--logarithmic-derivatives',
metavar='spdfg,e1:e2:de,radius',
help='Plot logarithmic derivatives. ' +
'Example: -l spdf,-1:1:0.05,1.3. ' +
'Energy range and/or radius can be left out.')
parser.add_option('-w', '--write', action='store_true')
parser.add_option('-s', '--scalar-relativistic', action='store_true')
parser.add_option('--no-check', action='store_true')
parser.add_option('-t', '--tag', type='string')
parser.add_option('-a', '--alpha', type=float)
opt, args = parser.parse_args(argv)
if len(args) == 0:
symbols = [symbol for symbol in chemical_symbols
if symbol in parameters]
elif len(args) == 1 and '-' in args[0]:
Z1, Z2 = args[0].split('-')
Z1 = str2z(Z1)
if Z2:
Z2 = str2z(Z2)
else:
Z2 = 86
symbols = range(Z1, Z2 + 1)
else:
symbols = args
for symbol in symbols:
Z = str2z(symbol)
symbol = chemical_symbols[Z]
kwargs = get_parameters(symbol, opt)
print kwargs
gen = _generate(**kwargs)
if opt.no_check:
ok = True
else:
ok = gen.check_all()
#gen.test_convergence()
if opt.write or opt.tag:
gen.make_paw_setup(opt.tag).write_xml()
if opt.logarithmic_derivatives or opt.plot:
import matplotlib.pyplot as plt
if opt.logarithmic_derivatives:
r = 1.1 * gen.rcmax
emin = min(min(wave.e_n) for wave in gen.waves_l) - 0.8
emax = max(max(wave.e_n) for wave in gen.waves_l) + 0.8
lvalues, energies, r = parse_ld_str(opt.logarithmic_derivatives,
(emin, emax, 0.05), r)
ldmax = 0.0
for l in lvalues:
ld = gen.aea.logarithmic_derivative(l, energies, r)
plt.plot(energies, ld, colors[l], label='spdfg'[l])
ld = gen.logarithmic_derivative(l, energies, r)
plt.plot(energies, ld, '--' + colors[l])
# Fixed points:
if l < len(gen.waves_l):
efix = gen.waves_l[l].e_n
ldfix = gen.logarithmic_derivative(l, efix, r)
plt.plot(efix, ldfix, 'x' + colors[l])
ldmax = max(ldmax, max(abs(ld) for ld in ldfix))
if l == gen.l0:
efix = [gen.e0]
ldfix = gen.logarithmic_derivative(l, efix, r)
plt.plot(efix, ldfix, 'x' + colors[l])
ldmax = max(ldmax, max(abs(ld) for ld in ldfix))
if ldmax != 0.0:
plt.axis(ymin=-3 * ldmax, ymax=3 * ldmax)
plt.xlabel('energy [Ha]')
plt.ylabel(r'$d\phi_{\ell\epsilon}(r)/dr/\phi_{\ell\epsilon}' +
r'(r)|_{r=r_c}$')
plt.legend(loc='best')
if opt.plot:
gen.plot()
try:
plt.show()
except KeyboardInterrupt:
pass
return gen
def get_parameters(symbol, opt):
if symbol in parameters:
projectors, radii, extra = parameters[symbol]
else:
projectors, radii, extra = None, 1.0, {}
if opt.projectors:
projectors = opt.projectors
if opt.radius:
radii = [float(r) for r in opt.radius.split(',')]
if isinstance(radii, float):
radii = [radii]
scale = 1.0#0.9
radii = [scale * r for r in radii]
if opt.pseudize:
type, nderiv = opt.pseudize.split(',')
pseudize = (type, int(nderiv))
else:
pseudize = ('poly', 4)
l0 = None
if opt.zero_potential:
x = opt.zero_potential.split(',')
type = x[0]
if len(x) == 1:
# select only zero_potential type (with defaults)
# i.e. on the command line: -0 {f,poly}
nderiv0 = 6
r0 = max(radii)
elif len(x) == 2:
# select zero_potential type, nderivs
# i.e. on the command line: -0 f,nderivs
nderiv0 = int(x[1])
r0 = max(radii)
else:
if x[2] in ['min', 'max']:
# select zero_potential type, nderivs, min/max
# i.e. on the command line: -0 f,nderivs,{min,max}
nderiv0 = int(x[1])
r0 = eval(x[2] + '(radii)')
else:
nderiv0 = int(x[1])
r0 = float(x[2])
if len(x) == 4:
e0 = float(x[3])
elif type == 'poly':
e0 = None
else:
e0 = 0.0
if type != 'poly':
l0 = 'spdfg'.find(type)
else:
if 'local' not in extra:
nderiv0 = 2
#nderiv0 = 3
e0 = None
r0 = extra.get('r0', min(radii) / scale) * scale
else:
nderiv0 = 5
#nderiv0 = 6
r0 = extra.get('r0', min(radii) * 0.9 / scale) * scale
l0 = 'spdfg'.find(extra['local'])
e0 = 0.0
return dict(symbol=symbol,
xc=opt.xc_functional,
projectors=projectors,
radii=radii,
scalar_relativistic=opt.scalar_relativistic, alpha=opt.alpha,
l0=l0, r0=r0, nderiv0=nderiv0, e0=e0,
pseudize=pseudize, rcore=opt.pseudo_core_density_radius)
def _generate(symbol, xc, projectors, radii,
scalar_relativistic, alpha,
l0, r0, nderiv0, e0,
pseudize, rcore):
aea = AllElectronAtom(symbol, xc)
gen = PAWSetupGenerator(aea, projectors,
scalar_relativistic)#, fd=None)
gen.construct_shape_function(alpha, radii, eps=1e-10)
gen.calculate_core_density()
gen.find_local_potential(l0, r0, nderiv0, e0)
gen.add_waves(radii)
gen.pseudize(pseudize[0], pseudize[1], rcore=rcore)
gen.construct_projectors()
return gen
if __name__ == '__main__':
generate()
|
gpl-3.0
|
macronucleus/chromagnon
|
setup.py
|
1
|
4536
|
#!/usr/bin/env python
"""
Usage:
python setup.py [py2app/sdist/install]
on Mac 20180202:
modify python3.6.sysconfig._get_sysconfigdata_name(check_exists=None)
ln -s /Users/USER/miniconda3/lib/libpython3.6m.dylib libpython3.6.dylib
then pythonw setup.py py2app to import javabridge
install numpy with nomkl like this
conda nomkl numpy scipy
see https://github.com/pyinstaller/pyinstaller/issues/2175
The code was tested with Miniconda verion of python
"""
import os, sys, stat
from setuptools import setup
from shutil import rmtree
pyversion = sys.version_info.major
#---------- prepare to build the distribution packages ---------------
if sys.argv[1] == 'py2app':
suff = 'Mac'
try:
from Chromagnon import version as chv
except ImportError:
import version as chv
version = chv.version
folder = 'ChromagnonV%s%s' % (version.replace('.', ''), suff)
#---------- remove previous build ---------------
if os.path.exists(folder):
# http://stackoverflow.com/questions/4829043/how-to-remove-read-only-attrib-directory-with-python-in-windows
def on_rm_error( func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod( path, stat.S_IWRITE )
os.unlink( path )
rmtree(folder, onerror=on_rm_error)
if os.path.exists('build'):
rmtree('./build')
#---------- Get file system info -----------------
home = os.path.expanduser('~')
script = os.path.join('py', 'Chromagnon', 'chromagnon.py')
mini = 'miniconda%i' % pyversion
mainscript = os.path.join(home, 'codes', script)
conda = os.path.join(home, mini)
# ------- options ----------------------
excludes = ['matplotlib', 'pylab', 'Tkinter', 'Tkconstants', 'tcl', 'doctest', 'pydoc', 'pdb', 'pyqt5', 'pyqtgraph', 'pytz', 'opencv', 'reikna', 'pycuda', 'skcuda', 'wx.py', 'distutils', 'setuptools'] # email is required for bioformats
# ------- Platform ----------------------
# ------- MacOSX options py2app (v0.9)----------------
if sys.platform == 'darwin' and sys.argv[1] == 'py2app':
packages = ['tifffile', 'javabridge', 'bioformats', 'html'] # py2app cannot find some packages without help, html is for bioformats
libdir = os.path.join(conda, 'lib')
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = libdir
# http://beckism.com/2009/03/pyobjc_tips/
OPTIONS = {'argv_emulation': False, # to enable ndviewer window
'site_packages': True,
'use_pythonpath': True,
'packages': packages,
'excludes': excludes}
extra_options = dict(
setup_requires=['py2app'],
app=[mainscript],
options={'py2app': OPTIONS},
)
# --------------- python setup.py sdist/install ------------------
# Normally unix-like platforms will use "setup.py install"
else:
# http://stackoverflow.com/questions/2159211/use-distribute-setuptools-to-create-symlink-or-run-script
from setuptools.command.install import install
class CustomInstallCommand(install):
"""Customized setuptools install command."""
def run(self):
install.run(self)
mainscript = os.path.join('Chromagnon', 'chromagnon.py')
h = open('Chromagnon/version.py')
line = h.readline()
exec(line)
packages = ['Chromagnon', 'Chromagnon.Priithon', 'Chromagnon.Priithon.plt', 'Chromagnon.PriCommon', 'Chromagnon.ndviewer', 'Chromagnon.imgio', 'Chromagnon.imgio.mybioformats']
extra_options = dict(
install_requires=['numpy', 'scipy', 'wxpython>=3.0', 'pyopengl', 'pillow', 'six', 'tifffile<=0.15.1'],
packages=packages,
cmdclass={
'install': CustomInstallCommand}
)
if sys.platform.startswith('darwin'):
extra_options['entry_points'] = {'console_scripts': ['chromagnon=Chromagnon.chromagnon:command_line']}
extra_options['scripts'] = [mainscript]
else:
extra_options['entry_points'] = {'console_scripts': ['chromagnon=Chromagnon.chromagnon:command_line']}
if not sys.platform.startswith('linux'):
extra_options['install_requires'].append('PyPubSub')
# -------- Execute -----------------
setup(
name="Chromagnon",
author='Atsushi Matsuda',
version=version,
**extra_options
)
if sys.argv[1].startswith('py2app'):
# rename the dist folder
os.rename('dist', folder)
|
mit
|
TomAugspurger/pandas
|
pandas/tests/scalar/interval/test_arithmetic.py
|
1
|
1477
|
from datetime import timedelta
import numpy as np
import pytest
from pandas import Interval, Timedelta, Timestamp
@pytest.mark.parametrize("method", ["__add__", "__sub__"])
@pytest.mark.parametrize(
"interval",
[
Interval(Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00")),
Interval(Timedelta(days=7), Timedelta(days=14)),
],
)
@pytest.mark.parametrize(
"delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")]
)
def test_time_interval_add_subtract_timedelta(interval, delta, method):
# https://github.com/pandas-dev/pandas/issues/32023
result = getattr(interval, method)(delta)
left = getattr(interval.left, method)(delta)
right = getattr(interval.right, method)(delta)
expected = Interval(left, right)
assert result == expected
@pytest.mark.parametrize("interval", [Interval(1, 2), Interval(1.0, 2.0)])
@pytest.mark.parametrize(
"delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")]
)
def test_numeric_interval_add_timedelta_raises(interval, delta):
# https://github.com/pandas-dev/pandas/issues/32023
msg = "|".join(
[
"unsupported operand",
"cannot use operands",
"Only numeric, Timestamp and Timedelta endpoints are allowed",
]
)
with pytest.raises((TypeError, ValueError), match=msg):
interval + delta
with pytest.raises((TypeError, ValueError), match=msg):
delta + interval
|
bsd-3-clause
|
bert9bert/statsmodels
|
examples/python/predict.py
|
33
|
1580
|
## Prediction (out of sample)
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
# ## Artificial data
nsample = 50
sig = 0.25
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, np.sin(x1), (x1-5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, 0.5, -0.02]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# ## Estimation
olsmod = sm.OLS(y, X)
olsres = olsmod.fit()
print(olsres.summary())
# ## In-sample prediction
ypred = olsres.predict(X)
print(ypred)
# ## Create a new sample of explanatory variables Xnew, predict and plot
x1n = np.linspace(20.5,25, 10)
Xnew = np.column_stack((x1n, np.sin(x1n), (x1n-5)**2))
Xnew = sm.add_constant(Xnew)
ynewpred = olsres.predict(Xnew) # predict out of sample
print(ynewpred)
# ## Plot comparison
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(x1, y, 'o', label="Data")
ax.plot(x1, y_true, 'b-', label="True")
ax.plot(np.hstack((x1, x1n)), np.hstack((ypred, ynewpred)), 'r', label="OLS prediction")
ax.legend(loc="best");
### Predicting with Formulas
# Using formulas can make both estimation and prediction a lot easier
from statsmodels.formula.api import ols
data = {"x1" : x1, "y" : y}
res = ols("y ~ x1 + np.sin(x1) + I((x1-5)**2)", data=data).fit()
# We use the `I` to indicate use of the Identity transform. Ie., we don't want any expansion magic from using `**2`
res.params
# Now we only have to pass the single variable and we get the transformed right-hand side variables automatically
res.predict(exog=dict(x1=x1n))
|
bsd-3-clause
|
kylerbrown/scikit-learn
|
sklearn/decomposition/tests/test_truncated_svd.py
|
240
|
6055
|
"""Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
bsd-3-clause
|
mahak/spark
|
python/pyspark/pandas/series.py
|
9
|
197528
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import datetime
import re
import inspect
import sys
from collections.abc import Mapping
from functools import partial, wraps, reduce
from typing import (
Any,
Callable,
Dict,
Generic,
IO,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
no_type_check,
overload,
TYPE_CHECKING,
)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.io.formats.printing import pprint_thing
from pandas.api.types import is_list_like, is_hashable
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame
from pyspark.sql.types import (
ArrayType,
BooleanType,
DataType,
DoubleType,
FloatType,
IntegerType,
IntegralType,
LongType,
NumericType,
Row,
StructType,
)
from pyspark.sql.window import Window
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, Label, Name, Scalar, T
from pyspark.pandas.accessors import PandasOnSparkSeriesMethods
from pyspark.pandas.categorical import CategoricalAccessor
from pyspark.pandas.config import get_option
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.exceptions import SparkPandasIndexingError
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.generic import Frame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
DEFAULT_SERIES_NAME,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.plot import PandasOnSparkPlotAccessor
from pyspark.pandas.ml import corr
from pyspark.pandas.utils import (
combine_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
verify_temp_column_name,
SPARK_CONF_ARROW_ENABLED,
)
from pyspark.pandas.datetimes import DatetimeMethods
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkSeriesMethods
from pyspark.pandas.strings import StringMethods
from pyspark.pandas.typedef import (
infer_return_type,
spark_type_to_pandas_dtype,
ScalarType,
SeriesType,
)
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import SeriesGroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
# This regular expression pattern is complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ in Series.
# This pattern basically seeks the footer string from pandas'
REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)")
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``
Parameters
----------
other : Series or scalar value
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
{series_examples}
"""
_add_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.add(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
>>> df.a.radd(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.subtract(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rsub(df.b)
a 0.0
b NaN
c -2.0
d NaN
dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.multiply(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
>>> df.a.rmul(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.divide(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rdiv(df.b)
a 1.0
b NaN
c 0.5
d NaN
dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.pow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
>>> df.a.rpow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.mod(df.b)
a 0.0
b NaN
c 0.0
d NaN
dtype: float64
>>> df.a.rmod(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.floordiv(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rfloordiv(df.b)
a 1.0
b NaN
c 0.0
d NaN
dtype: float64
"""
# Needed to disambiguate Series.str and str type
str_type = str
def _create_type_for_series_type(param: Any) -> Type[SeriesType]:
from pyspark.pandas.typedef import NameTypeHolder
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {}) # type: Type[NameTypeHolder]
new_class.tpe = param
else:
new_class = param.type if isinstance(param, np.dtype) else param
return SeriesType[new_class] # type: ignore
if (3, 5) <= sys.version_info < (3, 7) and __name__ != "__main__":
from typing import GenericMeta # type: ignore
old_getitem = GenericMeta.__getitem__ # type: ignore
@no_type_check
def new_getitem(self, params):
if hasattr(self, "is_series"):
return old_getitem(self, _create_type_for_series_type(params))
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class Series(Frame, IndexOpsMixin, Generic[T]):
"""
pandas-on-Spark Series that corresponds to pandas Series logically. This holds Spark Column
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
:ivar _psdf: Parent's pandas-on-Spark DataFrame
:type _psdf: ps.DataFrame
Parameters
----------
data : array-like, dict, or scalar value, pandas Series
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a pandas Series, other arguments should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
@no_type_check
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
assert data is not None
if isinstance(data, DataFrame):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
self._anchor = data # type: DataFrame
self._col_label = index # type: Label
else:
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert not fastpath
s = data
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
internal = InternalFrame.from_pandas(pd.DataFrame(s))
if s.name is None:
internal = internal.copy(column_labels=[None])
anchor = DataFrame(internal)
self._anchor = anchor
self._col_label = anchor._internal.column_labels[0]
object.__setattr__(anchor, "_psseries", {self._column_label: self})
@property
def _psdf(self) -> DataFrame:
return self._anchor
@property
def _internal(self) -> InternalFrame:
return self._psdf._internal.select_column(self._column_label)
@property
def _column_label(self) -> Optional[Label]:
return self._col_label
def _update_anchor(self, psdf: DataFrame) -> None:
assert psdf._internal.column_labels == [self._column_label], (
psdf._internal.column_labels,
[self._column_label],
)
self._anchor = psdf
object.__setattr__(psdf, "_psseries", {self._column_label: self})
def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> "Series":
"""
Copy pandas-on-Spark Series with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Series
"""
name = name_like_string(self._column_label)
internal = self._internal.copy(
data_spark_columns=[scol.alias(name)],
data_fields=[
field if field is None or field.struct_field is None else field.copy(name=name)
],
)
return first_series(DataFrame(internal))
spark = CachedAccessor("spark", SparkSeriesMethods)
@property
def dtypes(self) -> Dtype:
"""Return the dtype object of the underlying data.
>>> s = ps.Series(list('abc'))
>>> s.dtype == s.dtypes
True
"""
return self.dtype
@property
def axes(self) -> List["Index"]:
"""
Return a list of the row axis labels.
Examples
--------
>>> psser = ps.Series([1, 2, 3])
>>> psser.axes
[Int64Index([0, 1, 2], dtype='int64')]
"""
return [self.index]
# Arithmetic Operators
def add(self, other: Any) -> "Series":
return self + other
add.__doc__ = _flex_doc_SERIES.format(
desc="Addition",
op_name="+",
equiv="series + other",
reverse="radd",
series_examples=_add_example_SERIES,
)
def radd(self, other: Any) -> "Series":
return other + self
radd.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Addition",
op_name="+",
equiv="other + series",
reverse="add",
series_examples=_add_example_SERIES,
)
def div(self, other: Any) -> "Series":
return self / other
div.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rdiv",
series_examples=_div_example_SERIES,
)
divide = div
def rdiv(self, other: Any) -> "Series":
return other / self
rdiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="div",
series_examples=_div_example_SERIES,
)
def truediv(self, other: Any) -> "Series":
return self / other
truediv.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rtruediv",
series_examples=_div_example_SERIES,
)
def rtruediv(self, other: Any) -> "Series":
return other / self
rtruediv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="truediv",
series_examples=_div_example_SERIES,
)
def mul(self, other: Any) -> "Series":
return self * other
mul.__doc__ = _flex_doc_SERIES.format(
desc="Multiplication",
op_name="*",
equiv="series * other",
reverse="rmul",
series_examples=_mul_example_SERIES,
)
multiply = mul
def rmul(self, other: Any) -> "Series":
return other * self
rmul.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Multiplication",
op_name="*",
equiv="other * series",
reverse="mul",
series_examples=_mul_example_SERIES,
)
def sub(self, other: Any) -> "Series":
return self - other
sub.__doc__ = _flex_doc_SERIES.format(
desc="Subtraction",
op_name="-",
equiv="series - other",
reverse="rsub",
series_examples=_sub_example_SERIES,
)
subtract = sub
def rsub(self, other: Any) -> "Series":
return other - self
rsub.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Subtraction",
op_name="-",
equiv="other - series",
reverse="sub",
series_examples=_sub_example_SERIES,
)
def mod(self, other: Any) -> "Series":
return self % other
mod.__doc__ = _flex_doc_SERIES.format(
desc="Modulo",
op_name="%",
equiv="series % other",
reverse="rmod",
series_examples=_mod_example_SERIES,
)
def rmod(self, other: Any) -> "Series":
return other % self
rmod.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Modulo",
op_name="%",
equiv="other % series",
reverse="mod",
series_examples=_mod_example_SERIES,
)
def pow(self, other: Any) -> "Series":
return self ** other
pow.__doc__ = _flex_doc_SERIES.format(
desc="Exponential power of series",
op_name="**",
equiv="series ** other",
reverse="rpow",
series_examples=_pow_example_SERIES,
)
def rpow(self, other: Any) -> "Series":
return other ** self
rpow.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Exponential power",
op_name="**",
equiv="other ** series",
reverse="pow",
series_examples=_pow_example_SERIES,
)
def floordiv(self, other: Any) -> "Series":
return self // other
floordiv.__doc__ = _flex_doc_SERIES.format(
desc="Integer division",
op_name="//",
equiv="series // other",
reverse="rfloordiv",
series_examples=_floordiv_example_SERIES,
)
def rfloordiv(self, other: Any) -> "Series":
return other // self
rfloordiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Integer division",
op_name="//",
equiv="other // series",
reverse="floordiv",
series_examples=_floordiv_example_SERIES,
)
# create accessor for pandas-on-Spark specific methods.
pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkSeriesMethods)
# keep the name "koalas" for backward compatibility.
koalas = CachedAccessor("koalas", PandasOnSparkSeriesMethods)
# Comparison Operators
def eq(self, other: Any) -> bool:
"""
Compare if the current value is equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a == 1
a True
b False
c False
d False
Name: a, dtype: bool
>>> df.b.eq(1)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self == other
equals = eq
def gt(self, other: Any) -> "Series":
"""
Compare if the current value is greater than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a > 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.gt(1)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self > other
def ge(self, other: Any) -> "Series":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a >= 2
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ge(2)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self >= other
def lt(self, other: Any) -> "Series":
"""
Compare if the current value is less than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a < 1
a False
b False
c False
d False
Name: a, dtype: bool
>>> df.b.lt(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self < other
def le(self, other: Any) -> "Series":
"""
Compare if the current value is less than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a <= 2
a True
b True
c False
d False
Name: a, dtype: bool
>>> df.b.le(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self <= other
def ne(self, other: Any) -> "Series":
"""
Compare if the current value is not equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a != 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ne(1)
a False
b True
c False
d True
Name: b, dtype: bool
"""
return self != other
def divmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `divmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.rdivmod
"""
return self.floordiv(other), self.mod(other)
def rdivmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `rdivmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.divmod
"""
return self.rfloordiv(other), self.rmod(other)
def between(self, left: Any, right: Any, inclusive: bool = True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = ps.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = ps.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
# TODO: arg should support Series
# TODO: NaN and None
def map(self, arg: Union[Dict, Callable]) -> "Series":
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict``.
.. note:: make sure the size of the dictionary is not huge because it could
downgrade the performance or throw OutOfMemoryError due to a huge
expression within Spark. Consider the input as a functions as an
alternative instead in this case.
Parameters
----------
arg : function or dict
Mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``None``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``None``.
Examples
--------
>>> s = ps.Series(['cat', 'dog', None, 'rabbit'])
>>> s
0 cat
1 dog
2 None
3 rabbit
dtype: object
``map`` accepts a ``dict``. Values that are not found
in the ``dict`` are converted to ``None``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 None
3 None
dtype: object
It also accepts a function:
>>> def format(x) -> str:
... return 'I am a {}'.format(x)
>>> s.map(format)
0 I am a cat
1 I am a dog
2 I am a None
3 I am a rabbit
dtype: object
"""
if isinstance(arg, dict):
is_start = True
# In case dictionary is empty.
current = F.when(SF.lit(False), SF.lit(None).cast(self.spark.data_type))
for to_replace, value in arg.items():
if is_start:
current = F.when(self.spark.column == SF.lit(to_replace), value)
is_start = False
else:
current = current.when(self.spark.column == SF.lit(to_replace), value)
if hasattr(arg, "__missing__"):
tmp_val = arg[np._NoValue]
del arg[np._NoValue] # Remove in case it's set in defaultdict.
current = current.otherwise(SF.lit(tmp_val))
else:
current = current.otherwise(SF.lit(None).cast(self.spark.data_type))
return self._with_new_scol(current)
else:
return self.apply(arg)
@property
def shape(self) -> Tuple[int]:
"""Return a tuple of the shape of the underlying data."""
return (len(self),)
@property
def name(self) -> Name:
"""Return name of the Series."""
name = self._column_label
if name is not None and len(name) == 1:
return name[0]
else:
return name
@name.setter
def name(self, name: Name) -> None:
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index: Optional[Name] = None, **kwargs: Any) -> "Series":
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
pass
elif not is_hashable(index):
raise TypeError("Series.name must be a hashable type")
elif not isinstance(index, tuple):
index = (index,)
name = name_like_string(index)
scol = self.spark.column.alias(name)
field = self._internal.data_fields[0].copy(name=name)
internal = self._internal.copy(
column_labels=[index],
data_spark_columns=[scol],
data_fields=[field],
column_label_names=None,
)
psdf = DataFrame(internal) # type: DataFrame
if kwargs.get("inplace", False):
self._col_label = index
self._update_anchor(psdf)
return self
else:
return first_series(psdf)
def rename_axis(
self, mapper: Optional[Any] = None, index: Optional[Any] = None, inplace: bool = False
) -> Optional["Series"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper, index : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to the index values.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series.
Returns
-------
Series, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Examples
--------
>>> s = ps.Series(["dog", "cat", "monkey"], name="animal")
>>> s # doctest: +NORMALIZE_WHITESPACE
0 dog
1 cat
2 monkey
Name: animal, dtype: object
>>> s.rename_axis("index").sort_index() # doctest: +NORMALIZE_WHITESPACE
index
0 dog
1 cat
2 monkey
Name: animal, dtype: object
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> s = ps.Series([4, 4, 2], index=index, name='num_legs')
>>> s # doctest: +NORMALIZE_WHITESPACE
type name
mammal dog 4
cat 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
class name
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
TYPE NAME
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
"""
psdf = self.to_frame().rename_axis(mapper=mapper, index=index, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
@property
def index(self) -> "ps.Index":
"""The index (axis labels) Column of the Series.
See Also
--------
Index
"""
return self._psdf.index
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
>>> ps.Series([1, 2, 3]).is_unique
True
>>> ps.Series([1, 2, 2]).is_unique
False
>>> ps.Series([1, 2, 3, None]).is_unique
True
"""
scol = self.spark.column
# Here we check:
# 1. the distinct count without nulls and count without nulls for non-null values
# 2. count null values and see if null is a distinct value.
#
# This workaround is in order to calculate the distinct count including nulls in
# single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.
return self._internal.spark_frame.select(
(F.count(scol) == F.countDistinct(scol))
& (F.count(F.when(scol.isNull(), 1).otherwise(None)) <= 1)
).collect()[0][0]
def reset_index(
self,
level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,
drop: bool = False,
name: Optional[Name] = None,
inplace: bool = False,
) -> Optional[Union["Series", DataFrame]]:
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx 0
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace and not drop:
raise TypeError("Cannot reset_index inplace on a Series to create a DataFrame")
if drop:
psdf = self._psdf[[self.name]]
else:
psser = self
if name is not None:
psser = psser.rename(name)
psdf = psser.to_frame()
psdf = psdf.reset_index(level=level, drop=drop)
if drop:
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
else:
return psdf
def to_frame(self, name: Optional[Name] = None) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = ps.Series(["a", "b", "c"])
>>> s.to_frame()
0
0 a
1 b
2 c
>>> s = ps.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is not None:
renamed = self.rename(name)
elif self._column_label is None:
renamed = self.rename(DEFAULT_SERIES_NAME)
else:
renamed = self
return DataFrame(renamed._internal)
to_dataframe = to_frame
def to_string(
self,
buf: Optional[IO[str]] = None,
na_rep: str = "NaN",
float_format: Optional[Callable[[float], str]] = None,
header: bool = True,
index: bool = True,
length: bool = False,
dtype: bool = False,
name: bool = False,
max_rows: Optional[int] = None,
) -> Optional[str]:
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psseries = self.head(max_rows)
else:
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_string, pd.Series.to_string, args
)
def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_clipboard, pd.Series.to_clipboard, args
)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into: Type = dict) -> Mapping:
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_dict, pd.Series.to_dict, args
)
def to_latex(
self,
buf: Optional[IO[str]] = None,
columns: Optional[List[Name]] = None,
col_space: Optional[int] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
bold_rows: bool = False,
column_format: Optional[str] = None,
longtable: Optional[bool] = None,
escape: Optional[bool] = None,
encoding: Optional[str] = None,
decimal: str = ".",
multicolumn: Optional[bool] = None,
multicolumn_format: Optional[str] = None,
multirow: Optional[bool] = None,
) -> Optional[str]:
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_latex, pd.Series.to_latex, args
)
to_latex.__doc__ = DataFrame.to_latex.__doc__
def to_pandas(self) -> pd.Series:
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return self._to_internal_pandas().copy()
def to_list(self) -> List:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
"""
return self._to_internal_pandas().tolist()
tolist = to_list
def drop_duplicates(self, keep: str = "first", inplace: bool = False) -> Optional["Series"]:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series
Series with duplicates dropped.
Examples
--------
Generate a Series with duplicated entries.
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s.sort_index()
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates().sort_index()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last').sort_index()
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s.sort_index()
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].drop_duplicates(keep=keep)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def reindex(self, index: Optional[Any] = None, fill_value: Optional[Any] = None) -> "Series":
"""
Conform Series to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced.
Parameters
----------
index: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
Series with changed index.
See Also
--------
Series.reset_index : Remove row labels or move them to new columns.
Examples
--------
Create a series with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> ser = ps.Series([200, 200, 404, 404, 301],
... index=index, name='http_status')
>>> ser
Firefox 200
Chrome 200
Safari 404
IE10 404
Konqueror 301
Name: http_status, dtype: int64
Create a new index and reindex the Series. By default
values in the new index that do not have corresponding
records in the Series are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> ser.reindex(new_index).sort_index()
Chrome 200.0
Comodo Dragon NaN
IE10 404.0
Iceweasel NaN
Safari 404.0
Name: http_status, dtype: float64
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> ser.reindex(new_index, fill_value=0).sort_index()
Chrome 200
Comodo Dragon 0
IE10 404
Iceweasel 0
Safari 404
Name: http_status, dtype: int64
To further illustrate the filling functionality in
``reindex``, we will create a Series with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> ser2 = ps.Series([100, 101, np.nan, 100, 89, 88],
... name='prices', index=date_index)
>>> ser2.sort_index()
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Name: prices, dtype: float64
Suppose we decide to expand the series to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> ser2.reindex(date_index2).sort_index()
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Name: prices, dtype: float64
"""
return first_series(self.to_frame().reindex(index=index, fill_value=fill_value)).rename(
self.name
)
def reindex_like(self, other: Union["Series", "DataFrame"]) -> "Series":
"""
Return a Series with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index.
Parameters
----------
other : Series or DataFrame
Its row and column indices are used to define the new indices
of this object.
Returns
-------
Series
Series with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, ...)``.
Examples
--------
>>> s1 = ps.Series([24.3, 31.0, 22.0, 35.0],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'),
... name="temp_celsius")
>>> s1
2014-02-12 24.3
2014-02-13 31.0
2014-02-14 22.0
2014-02-15 35.0
Name: temp_celsius, dtype: float64
>>> s2 = ps.Series(["low", "low", "medium"],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']),
... name="winspeed")
>>> s2
2014-02-12 low
2014-02-13 low
2014-02-15 medium
Name: winspeed, dtype: object
>>> s2.reindex_like(s1).sort_index()
2014-02-12 low
2014-02-13 low
2014-02-14 None
2014-02-15 medium
Name: winspeed, dtype: object
"""
if isinstance(other, (Series, DataFrame)):
return self.reindex(index=other.index)
else:
raise TypeError("other must be a pandas-on-Spark Series or DataFrame")
def fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit: Optional[int] = None,
) -> Optional["Series"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
Series
Series with NA entries filled.
Examples
--------
>>> s = ps.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
>>> s
0 NaN
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
Name: x, dtype: float64
Replace all NaN elements with 0s.
>>> s.fillna(0)
0 0.0
1 2.0
2 3.0
3 4.0
4 0.0
5 6.0
Name: x, dtype: float64
We can also propagate non-null values forward or backward.
>>> s.fillna(method='ffill')
0 NaN
1 2.0
2 3.0
3 4.0
4 4.0
5 6.0
Name: x, dtype: float64
>>> s = ps.Series([np.nan, 'a', 'b', 'c', np.nan], name='x')
>>> s.fillna(method='ffill')
0 None
1 a
2 b
3 c
4 c
Name: x, dtype: object
"""
psser = self._fillna(value=value, method=method, axis=axis, limit=limit)
if method is not None:
psser = DataFrame(psser._psdf._internal.resolved_copy)._psser_for(self._column_label)
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._psdf._update_internal_frame(psser._psdf._internal, requires_same_anchor=False)
return None
else:
return psser._with_new_scol(psser.spark.column) # TODO: dtype?
def _fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
limit: Optional[int] = None,
part_cols: Sequence["ColumnOrName"] = (),
) -> "Series":
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
if (method is not None) and (method not in ["ffill", "pad", "backfill", "bfill"]):
raise ValueError("Expecting 'pad', 'ffill', 'backfill' or 'bfill'.")
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
if not self.spark.nullable:
return self.copy()
cond = scol.isNull()
if value is not None:
if not isinstance(value, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
scol = F.when(cond, value).otherwise(scol)
else:
if method in ["ffill", "pad"]:
func = F.last
end = Window.currentRow - 1
if limit is not None:
begin = Window.currentRow - limit
else:
begin = Window.unboundedPreceding
elif method in ["bfill", "backfill"]:
func = F.first
begin = Window.currentRow + 1
if limit is not None:
end = Window.currentRow + limit
else:
end = Window.unboundedFollowing
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(begin, end)
)
scol = F.when(cond, func(scol, True).over(window)).otherwise(scol)
return DataFrame(
self._psdf._internal.with_new_spark_column(
self._column_label, scol.alias(name_like_string(self.name)) # TODO: dtype?
)
)._psser_for(self._column_label)
def dropna(self, axis: Axis = 0, inplace: bool = False, **kwargs: Any) -> Optional["Series"]:
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ps.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO: last two examples from pandas produce different results.
psdf = self._psdf[[self.name]].dropna(axis=axis, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "Series":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
Series
Series with the values outside the clip boundaries replaced
Examples
--------
>>> ps.Series([0, 2, 4]).clip(1, 3)
0 1
1 2
2 3
dtype: int64
Notes
-----
One difference between this implementation and pandas is that running
`pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between
instances of 'str' and 'int'" while `ps.Series(['a', 'b']).clip(0, 1)` will output the
original Series, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise TypeError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
if isinstance(self.spark.data_type, NumericType):
scol = self.spark.column
if lower is not None:
scol = F.when(scol < lower, lower).otherwise(scol)
if upper is not None:
scol = F.when(scol > upper, upper).otherwise(scol)
return self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0]),
field=self._internal.data_fields[0],
)
else:
return self
def drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> "Series":
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
index : None
Redundant for application on Series, but index can be used instead of labels.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
Returns
-------
Series
Series with specified index labels removed.
See Also
--------
Series.dropna
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop single label A
>>> s.drop('A')
B 1
C 2
dtype: int64
Drop labels B and C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
With 'index' rather than 'labels' returns exactly same result.
>>> s.drop(index='A')
B 1
C 2
dtype: int64
>>> s.drop(index=['B', 'C'])
A 0
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
>>> s.drop(('lama', 'weight'))
lama speed 45.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop([('lama', 'speed'), ('falcon', 'weight')])
lama weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return first_series(self._drop(labels=labels, index=index, level=level))
def _drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> DataFrame:
if labels is not None:
if index is not None:
raise ValueError("Cannot specify both 'labels' and 'index'")
return self._drop(index=labels, level=level)
if index is not None:
internal = self._internal
if level is None:
level = 0
if level >= internal.index_level:
raise ValueError("'level' should be less than the number of indexes")
if is_name_like_tuple(index): # type: ignore
index_list = [cast(Label, index)]
elif is_name_like_value(index):
index_list = [(index,)]
elif all(is_name_like_value(idxes, allow_tuple=False) for idxes in index):
index_list = [(idex,) for idex in index]
elif not all(is_name_like_tuple(idxes) for idxes in index):
raise ValueError(
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
else:
index_list = cast(List[Label], index)
drop_index_scols = []
for idxes in index_list:
try:
index_scols = [
internal.index_spark_columns[lvl] == idx
for lvl, idx in enumerate(idxes, level)
]
except IndexError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
internal.index_level, len(idxes)
)
)
drop_index_scols.append(reduce(lambda x, y: x & y, index_scols))
cond = ~reduce(lambda x, y: x | y, drop_index_scols)
return DataFrame(internal.with_filter(cond))
else:
raise ValueError("Need to specify at least one of 'labels' or 'index'")
def head(self, n: int = 5) -> "Series":
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return first_series(self.to_frame().head(n)).rename(self.name)
def last(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select final periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the last few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the last 3 days:
>>> psser.last('3D')
2018-04-13 3
2018-04-15 4
dtype: int64
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
return first_series(self.to_frame().last(offset)).rename(self.name)
def first(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select first periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the first few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the first 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the first 3 days:
>>> psser.first('3D')
2018-04-09 1
2018-04-11 2
dtype: int64
Notice the data for 3 first calendar days were returned, not the first
3 observed days in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
return first_series(self.to_frame().first(offset)).rename(self.name)
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self) -> "Series":
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly created Series whereas pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Also
--------
Index.unique
groupby.SeriesGroupBy.unique
Examples
--------
>>> psser = ps.Series([2, 1, 3, 3], name='A')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: A, dtype: int64
>>> ps.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
dtype: datetime64[ns]
>>> psser.name = ('x', 'a')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: (x, a), dtype: int64
"""
sdf = self._internal.spark_frame.select(self.spark.column).distinct()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
column_labels=[self._column_label],
data_spark_columns=[scol_for(sdf, self._internal.data_spark_column_names[0])],
data_fields=[self._internal.data_fields[0]],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def sort_values(
self, ascending: bool = True, inplace: bool = False, na_position: str = "last"
) -> Optional["Series"]:
"""
Sort by the values.
Sort a Series in ascending or descending order by some criterion.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : Series ordered by values.
Examples
--------
>>> s = ps.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = ps.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]]._sort(
by=[self.spark.column], ascending=ascending, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def sort_index(
self,
axis: Axis = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["Series"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
pandas-on-Spark does not allow specifying the sorting algorithm at the moment,
default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : Series
Examples
--------
>>> df = ps.Series([2, 1, np.nan], index=['b', 'a', np.nan])
>>> df.sort_index()
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df.sort_index(ascending=False)
b 2.0
a 1.0
NaN NaN
dtype: float64
>>> df.sort_index(na_position='first')
NaN NaN
a 1.0
b 2.0
dtype: float64
>>> df.sort_index(inplace=True)
>>> df
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df = ps.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0')
>>> df.sort_index()
a 0 3
1 2
b 0 1
1 0
Name: 0, dtype: int64
>>> df.sort_index(level=1) # doctest: +SKIP
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
>>> df.sort_index(level=[1, 0])
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].sort_index(
axis=axis, level=level, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def swaplevel(
self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, copy: bool = True
) -> "Series":
"""
Swap levels i and j in a MultiIndex.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data. Must be True.
Returns
-------
Series
Series with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])
>>> midx # doctest: +SKIP
MultiIndex([('a', 1),
('b', 2)],
names=['word', 'number'])
>>> psser = ps.Series(['x', 'y'], index=midx)
>>> psser
word number
a 1 x
b 2 y
dtype: object
>>> psser.swaplevel()
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel(0, 1)
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel('number', 'word')
number word
1 a x
2 b y
dtype: object
"""
assert copy is True
return first_series(self.to_frame().swaplevel(i, j, axis=0)).rename(self.name)
def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "Series":
"""
Interchange axes and swap values axes appropriately.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([1, 2, 3], index=["x", "y", "z"])
>>> psser
x 1
y 2
z 3
dtype: int64
>>>
>>> psser.swapaxes(0, 0)
x 1
y 2
z 3
dtype: int64
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
if not i == j == 0:
raise ValueError("Axis must be 0 for Series")
return self.copy()
def add_prefix(self, prefix: str) -> "Series":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
"""
assert isinstance(prefix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(SF.lit(prefix), index_spark_column).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def add_suffix(self, suffix: str) -> "Series":
"""
Suffix labels with string suffix.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
"""
assert isinstance(suffix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(index_spark_column, SF.lit(suffix)).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def corr(self, other: "Series", method: str = "pearson") -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ps.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. pandas-on-Spark will return an error.
* pandas-on-Spark doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
columns = ["__corr_arg1__", "__corr_arg2__"]
psdf = self._psdf.assign(__corr_arg1__=self, __corr_arg2__=other)[columns]
psdf.columns = columns
c = corr(psdf, method=method)
return c.loc[tuple(columns)]
def nsmallest(self, n: int = 5) -> "Series":
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
0 1.0
1 2.0
2 3.0
3 4.0
5 6.0
dtype: float64
>>> s.nsmallest(3)
0 1.0
1 2.0
2 3.0
dtype: float64
"""
return self.sort_values(ascending=True).head(n)
def nlargest(self, n: int = 5) -> "Series":
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
7 8.0
6 7.0
5 6.0
3 4.0
2 3.0
dtype: float64
>>> s.nlargest(n=3)
7 8.0
6 7.0
5 6.0
dtype: float64
"""
return self.sort_values(ascending=False).head(n)
def append(
self, to_append: "Series", ignore_index: bool = False, verify_integrity: bool = False
) -> "Series":
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
Examples
--------
>>> s1 = ps.Series([1, 2, 3])
>>> s2 = ps.Series([4, 5, 6])
>>> s3 = ps.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With ignore_index set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
"""
return first_series(
self.to_frame().append(to_append.to_frame(), ignore_index, verify_integrity)
).rename(self.name)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "Series":
return first_series(
self.to_frame().sample(n=n, frac=frac, replace=replace, random_state=random_state)
).rename(self.name)
sample.__doc__ = DataFrame.sample.__doc__
@no_type_check
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__
def apply(self, func: Callable, args: Sequence[Any] = (), **kwds: Any) -> "Series":
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.transform : Only perform transforming type operations.
DataFrame.apply : The equivalent function for DataFrame.
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ps.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
assert callable(func), "the first argument should be a callable function."
try:
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
except TypeError:
# Falls back to schema inference if it fails to get signature.
should_infer_schema = True
apply_each = wraps(func)(lambda s: s.apply(func, args=args, **kwds))
if should_infer_schema:
return self.pandas_on_spark._transform_batch(apply_each, None)
else:
sig_return = infer_return_type(func)
if not isinstance(sig_return, ScalarType):
raise ValueError(
"Expected the return type of this function to be of scalar type, "
"but found type {}".format(sig_return)
)
return_type = cast(ScalarType, sig_return)
return self.pandas_on_spark._transform_batch(apply_each, return_type)
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(self, func: Union[str, List[str]]) -> Union[Scalar, "Series"]:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : str or a list of str
function name(s) as string apply to series.
Returns
-------
scalar, Series
The return can be:
- scalar : when Series.agg is called with single function
- Series : when Series.agg is called with several functions
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Only perform transforming type operations.
Series.groupby : Perform operations over groups.
DataFrame.aggregate : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s.agg('min')
1
>>> s.agg(['min', 'max']).sort_index()
max 4
min 1
dtype: int64
"""
if isinstance(func, list):
return first_series(self.to_frame().aggregate(func)).rename(self.name)
elif isinstance(func, str):
return getattr(self, func)()
else:
raise TypeError("func must be a string or list of strings")
agg = aggregate
def transpose(self, *args: Any, **kwargs: Any) -> "Series":
"""
Return the transpose, which is by definition self.
Examples
--------
It returns the same object as the transpose of the given series object, which is by
definition self.
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.transpose()
0 1
1 2
2 3
dtype: int64
"""
return self.copy()
T = property(transpose)
def transform(
self, func: Union[Callable, List[Callable]], axis: Axis = 0, *args: Any, **kwargs: Any
) -> Union["Series", DataFrame]:
"""
Call ``func`` producing the same type as `self` with transformed values
and that has the same axis length as input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function or list
A function or a list of functions to use for transforming the data.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
An instance of the same type with `self` that must have the same length as input.
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.apply : Invoke function on Series.
DataFrame.transform : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> def sqrt(x) -> float:
... return np.sqrt(x)
>>> s.transform(sqrt)
0 0.000000
1 1.000000
2 1.414214
dtype: float64
Even though the resulting instance must have the same length as the
input, it is possible to provide several input functions:
>>> def exp(x) -> float:
... return np.exp(x)
>>> s.transform([sqrt, exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if isinstance(func, list):
applied = []
for f in func:
applied.append(self.apply(f, args=args, **kwargs).rename(f.__name__))
internal = self._internal.with_new_columns(applied)
return DataFrame(internal)
else:
return self.apply(func, args=args, **kwargs)
def round(self, decimals: int = 0) -> "Series":
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
DataFrame.round
Examples
--------
>>> df = ps.Series([0.028208, 0.038683, 0.877076], name='x')
>>> df
0 0.028208
1 0.038683
2 0.877076
Name: x, dtype: float64
>>> df.round(2)
0 0.03
1 0.04
2 0.88
Name: x, dtype: float64
"""
if not isinstance(decimals, int):
raise TypeError("decimals must be an integer")
scol = F.round(self.spark.column, decimals)
return self._with_new_scol(scol) # TODO: dtype?
# TODO: add 'interpolation' parameter.
def quantile(
self, q: Union[float, Iterable[float]] = 0.5, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across
a large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
float or Series
If the current object is a Series and ``q`` is an array, a Series will be
returned where the index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4, 5])
>>> s.quantile(.5)
3.0
>>> (s + 1).quantile(.5)
4.0
>>> s.quantile([.25, .5, .75])
0.25 2.0
0.50 3.0
0.75 4.0
dtype: float64
>>> (s + 1).quantile([.25, .5, .75])
0.25 3.0
0.50 4.0
0.75 5.0
dtype: float64
"""
if isinstance(q, Iterable):
return first_series(
self.to_frame().quantile(q=q, axis=0, numeric_only=False, accuracy=accuracy)
).rename(self.name)
else:
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
if not isinstance(q, float):
raise TypeError(
"q must be a float or an array of floats; however, [%s] found." % type(q)
)
q_float = cast(float, q)
if q_float < 0.0 or q_float > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), q_float, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(quantile, name="quantile")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method: str = "average", ascending: bool = True) -> "Series":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> s = ps.Series([1, 2, 2, 3], name='A')
>>> s
0 1
1 2
2 2
3 3
Name: A, dtype: int64
>>> s.rank()
0 1.0
1 2.5
2 2.5
3 4.0
Name: A, dtype: float64
If method is set to 'min', it use lowest rank in group.
>>> s.rank(method='min')
0 1.0
1 2.0
2 2.0
3 4.0
Name: A, dtype: float64
If method is set to 'max', it use highest rank in group.
>>> s.rank(method='max')
0 1.0
1 3.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'first', it is assigned rank in order without groups.
>>> s.rank(method='first')
0 1.0
1 2.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'dense', it leaves no gaps in group.
>>> s.rank(method='dense')
0 1.0
1 2.0
2 2.0
3 3.0
Name: A, dtype: float64
"""
return self._rank(method, ascending).spark.analyzed
def _rank(
self,
method: str = "average",
ascending: bool = True,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> "Series":
if method not in ["average", "min", "max", "first", "dense"]:
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
raise ValueError(msg)
if self._internal.index_level > 1:
raise ValueError("rank do not support index now")
if ascending:
asc_func = lambda scol: scol.asc()
else:
asc_func = lambda scol: scol.desc()
if method == "first":
window = (
Window.orderBy(
asc_func(self.spark.column),
asc_func(F.col(NATURAL_ORDER_COLUMN_NAME)),
)
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.row_number().over(window)
elif method == "dense":
window = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.dense_rank().over(window)
else:
if method == "average":
stat_func = F.mean
elif method == "min":
stat_func = F.min
elif method == "max":
stat_func = F.max
window1 = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
window2 = Window.partitionBy([self.spark.column] + list(part_cols)).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
scol = stat_func(F.row_number().over(window1)).over(window2)
psser = self._with_new_scol(scol)
return psser.astype(np.float64)
def filter(
self,
items: Optional[Sequence[Any]] = None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis: Optional[Axis] = None,
) -> "Series":
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
return first_series(
self.to_frame().filter(items=items, like=like, regex=regex, axis=axis)
).rename(self.name)
filter.__doc__ = DataFrame.filter.__doc__
def describe(self, percentiles: Optional[List[float]] = None) -> "Series":
return first_series(self.to_frame().describe(percentiles)).rename(self.name)
describe.__doc__ = DataFrame.describe.__doc__
def diff(self, periods: int = 1) -> "Series":
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.b.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
Name: b, dtype: float64
Difference with previous value
>>> df.c.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 15.0
4 21.0
5 27.0
Name: c, dtype: float64
Difference with following value
>>> df.c.diff(periods=-1)
0 -3.0
1 -5.0
2 -7.0
3 -9.0
4 -11.0
5 NaN
Name: c, dtype: float64
"""
return self._diff(periods).spark.analyzed
def _diff(self, periods: int, *, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
scol = self.spark.column - F.lag(self.spark.column, periods).over(window)
return self._with_new_scol(scol, field=self._internal.data_fields[0].copy(nullable=True))
def idxmax(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 3, 5],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 5.0
dtype: float64
>>> s.idxmax()
'E'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 5], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 5.0
dtype: float64
>>> s.idxmax()
('b', 'f')
If multiple values equal the maximum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmax()
3
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# desc_nulls_(last|first) is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def idxmin(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 0],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 0.0
dtype: float64
>>> s.idxmin()
'D'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 0], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 0.0
dtype: float64
>>> s.idxmin()
('b', 'f')
If multiple values equal the minimum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmin()
10
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# asc_nulls_(last|first)is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def pop(self, item: Name) -> Union["Series", Scalar]:
"""
Return item and drop from series.
Parameters
----------
item : label
Label of index to be popped.
Returns
-------
Value that is popped from series.
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
>>> s.pop('A')
0
>>> s
B 1
C 2
dtype: int64
>>> s = ps.Series(data=np.arange(3), index=['A', 'A', 'C'])
>>> s
A 0
A 1
C 2
dtype: int64
>>> s.pop('A')
A 0
A 1
dtype: int64
>>> s
C 2
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.pop('lama')
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Also support for MultiIndex with several indexs.
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 0, 0, 0, 1, 1, 1],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 0, 2]]
... )
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
a cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('b', 'falcon', 'speed'))
(b, falcon, speed) 320.0
(b, falcon, speed) 1.0
dtype: float64
"""
if not is_name_like_value(item):
raise TypeError("'key' should be string or tuple that contains strings")
if not is_name_like_tuple(item):
item = (item,)
if self._internal.index_level < len(item):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(item), self._internal.index_level
)
)
internal = self._internal
scols = internal.index_spark_columns[len(item) :] + [self.spark.column]
rows = [internal.spark_columns[level] == index for level, index in enumerate(item)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
psdf = self._drop(item)
self._update_anchor(psdf)
if self._internal.index_level == len(item):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[internal.data_spark_column_names[0]].iloc[0]
item_string = name_like_string(item)
sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, SF.lit(str(item_string)))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=[self._column_label],
data_fields=[self._internal.data_fields[0]],
)
return first_series(DataFrame(internal))
else:
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in internal.index_spark_column_names[len(item) :]
],
index_fields=internal.index_fields[len(item) :],
index_names=self._internal.index_names[len(item) :],
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def copy(self, deep: bool = True) -> "Series":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : bool, default True
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : Series
Examples
--------
>>> s = ps.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
"""
return self._psdf.copy(deep=deep)._psser_for(self._column_label)
def mode(self, dropna: bool = True) -> "Series":
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series.
Examples
--------
>>> s = ps.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 NaN
6 NaN
7 NaN
dtype: float64
>>> s.mode()
0 1.0
dtype: float64
If there are several same modes, all items are shown
>>> s = ps.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
... np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 2.0
6 2.0
7 2.0
8 3.0
9 3.0
10 3.0
11 NaN
12 NaN
13 NaN
dtype: float64
>>> s.mode().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
dtype: float64
With 'dropna' set to 'False', we can also see NaN in the result
>>> s.mode(False).sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
... NaN
dtype: float64
"""
ser_count = self.value_counts(dropna=dropna, sort=False)
sdf_count = ser_count._internal.spark_frame
most_value = ser_count.max()
sdf_most_value = sdf_count.filter("count == {}".format(most_value))
sdf = sdf_most_value.select(
F.col(SPARK_DEFAULT_INDEX_NAME).alias(SPARK_DEFAULT_SERIES_NAME)
)
internal = InternalFrame(spark_frame=sdf, index_spark_columns=None, column_labels=[None])
return first_series(DataFrame(internal))
def keys(self) -> "ps.Index":
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> psser = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> psser.keys() # doctest: +SKIP
MultiIndex([( 'lama', 'speed'),
( 'lama', 'weight'),
( 'lama', 'length'),
( 'cow', 'speed'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'speed'),
('falcon', 'weight'),
('falcon', 'length')],
)
"""
return self.index
# TODO: 'regex', 'method' parameter
def replace(
self,
to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,
value: Optional[Union[List, Tuple]] = None,
regex: bool = False,
) -> "Series":
"""
Replace values given in to_replace with value.
Values of the Series are replaced with other values dynamically.
Parameters
----------
to_replace : str, list, tuple, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str:
- numeric: numeric values equal to to_replace will be replaced with value
- str: string exactly matching to_replace will be replaced with value
* list of str or numeric:
- if to_replace and value are both lists or tuples, they must be the same length.
- str and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values for different
existing values.
For example, {'a': 'b', 'y': 'z'} replaces the value ‘a’ with ‘b’ and ‘y’
with ‘z’. To use a dict in this way the value parameter should be None.
- For a DataFrame a dict can specify that different values should be replaced
in different columns. For example, {'a': 1, 'b': 'z'} looks for the value 1
in column ‘a’ and the value ‘z’ in column ‘b’ and replaces these values with
whatever is specified in value.
The value parameter should not be None in this case.
You can treat this as a special case of passing two lists except that you are
specifying the column to search in.
See the examples section for examples of each of these.
value : scalar, dict, list, tuple, str default None
Value to replace any values matching to_replace with.
For a DataFrame a dict of values can be used to specify which value to use
for each column (columns not in the dict will not be filled).
Regular expressions, strings and lists or dicts of such objects are also allowed.
Returns
-------
Series
Object after replacement.
Examples
--------
Scalar `to_replace` and `value`
>>> s = ps.Series([0, 1, 2, 3, 4])
>>> s
0 0
1 1
2 2
3 3
4 4
dtype: int64
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
List-like `to_replace`
>>> s.replace([0, 4], 5000)
0 5000
1 1
2 2
3 3
4 5000
dtype: int64
>>> s.replace([1, 2, 3], [10, 20, 30])
0 0
1 10
2 20
3 30
4 4
dtype: int64
Dict-like `to_replace`
>>> s.replace({1: 1000, 2: 2000, 3: 3000, 4: 4000})
0 0
1 1000
2 2000
3 3000
4 4000
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace(45, 450)
lama speed 450.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace([45, 30, 320], 500)
lama speed 500.0
weight 200.0
length 1.2
cow speed 500.0
weight 250.0
length 1.5
falcon speed 500.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace({45: 450, 30: 300})
lama speed 450.0
weight 200.0
length 1.2
cow speed 300.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
"""
if to_replace is None:
return self.fillna(method="ffill")
if not isinstance(to_replace, (str, list, tuple, dict, int, float)):
raise TypeError("'to_replace' should be one of str, list, tuple, dict, int, float")
if regex:
raise NotImplementedError("replace currently not support for regex")
to_replace = list(to_replace) if isinstance(to_replace, tuple) else to_replace
value = list(value) if isinstance(value, tuple) else value
if isinstance(to_replace, list) and isinstance(value, list):
if not len(to_replace) == len(value):
raise ValueError(
"Replacement lists must match in length. Expecting {} got {}".format(
len(to_replace), len(value)
)
)
to_replace = {k: v for k, v in zip(to_replace, value)}
if isinstance(to_replace, dict):
is_start = True
if len(to_replace) == 0:
current = self.spark.column
else:
for to_replace_, value in to_replace.items():
cond = (
(F.isnan(self.spark.column) | self.spark.column.isNull())
if pd.isna(to_replace_)
else (self.spark.column == SF.lit(to_replace_))
)
if is_start:
current = F.when(cond, value)
is_start = False
else:
current = current.when(cond, value)
current = current.otherwise(self.spark.column)
else:
cond = self.spark.column.isin(to_replace)
# to_replace may be a scalar
if np.array(pd.isna(to_replace)).any():
cond = cond | F.isnan(self.spark.column) | self.spark.column.isNull()
current = F.when(cond, value).otherwise(self.spark.column)
return self._with_new_scol(current) # TODO: dtype?
def update(self, other: "Series") -> None:
"""
Modify Series in place using non-NA values from passed Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series(['a', 'b', 'c'])
>>> s.update(ps.Series(['d', 'e'], index=[0, 2]))
>>> s.sort_index()
0 d
1 b
2 e
dtype: object
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6, 7, 8]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series([1, 2, 3], index=[10, 11, 12])
>>> s
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6], index=[11, 12, 13]))
>>> s.sort_index()
10 1
11 4
12 5
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, np.nan, 6]))
>>> s.sort_index()
0 4.0
1 2.0
2 6.0
dtype: float64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, Series):
raise TypeError("'other' must be a Series")
combined = combine_frames(self._psdf, other._psdf, how="leftouter")
this_scol = combined["this"]._internal.spark_column_for(self._column_label)
that_scol = combined["that"]._internal.spark_column_for(other._column_label)
scol = (
F.when(that_scol.isNotNull(), that_scol)
.otherwise(this_scol)
.alias(self._psdf._internal.spark_column_name_for(self._column_label))
)
internal = combined["this"]._internal.with_new_spark_column(
self._column_label, scol # TODO: dtype?
)
self._psdf._update_internal_frame(internal.resolved_copy, requires_same_anchor=False)
def where(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean Series
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.where(s1 > 0).sort_index()
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s1.where(s1 > 1, 10).sort_index()
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s1 + 100).sort_index()
0 100
1 101
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s2).sort_index()
0 100
1 200
2 2
3 3
4 4
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
assert isinstance(cond, Series)
# We should check the DataFrame from both `cond` and `other`.
should_try_ops_on_diff_frame = not same_anchor(cond, self) or (
isinstance(other, Series) and not same_anchor(other, self)
)
if should_try_ops_on_diff_frame:
# Try to perform it with 'compute.ops_on_diff_frame' option.
psdf = self.to_frame()
tmp_cond_col = verify_temp_column_name(psdf, "__tmp_cond_col__")
tmp_other_col = verify_temp_column_name(psdf, "__tmp_other_col__")
psdf[tmp_cond_col] = cond
psdf[tmp_other_col] = other
# above logic makes a Spark DataFrame looks like below:
# +-----------------+---+----------------+-----------------+
# |__index_level_0__| 0|__tmp_cond_col__|__tmp_other_col__|
# +-----------------+---+----------------+-----------------+
# | 0| 0| false| 100|
# | 1| 1| false| 200|
# | 3| 3| true| 400|
# | 2| 2| true| 300|
# | 4| 4| true| 500|
# +-----------------+---+----------------+-----------------+
condition = (
F.when(
psdf[tmp_cond_col].spark.column,
psdf._psser_for(psdf._internal.column_labels[0]).spark.column,
)
.otherwise(psdf[tmp_other_col].spark.column)
.alias(psdf._internal.data_spark_column_names[0])
)
internal = psdf._internal.with_new_columns(
[condition], column_labels=self._internal.column_labels
)
return first_series(DataFrame(internal))
else:
if isinstance(other, Series):
other = other.spark.column
condition = (
F.when(cond.spark.column, self.spark.column)
.otherwise(other)
.alias(self._internal.data_spark_column_names[0])
)
return self._with_new_scol(condition)
def mask(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean Series
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.mask(s1 > 0).sort_index()
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s1.mask(s1 > 1, 10).sort_index()
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> s1.mask(s1 > 1, s1 + 100).sort_index()
0 0
1 1
2 102
3 103
4 104
dtype: int64
>>> s1.mask(s1 > 1, s2).sort_index()
0 0
1 1
2 300
3 400
4 500
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
return self.where(cast(Series, ~cond), other)
def xs(self, key: Name, level: Optional[int] = None) -> "Series":
"""
Return cross-section from the Series.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
Series
Cross-section from the original Series
corresponding to the selected index levels.
Examples
--------
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
b cow speed 30.0
weight 250.0
length 1.5
c falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Get values at specified index
>>> s.xs('a')
lama speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at several indexes
>>> s.xs(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at specified index and level
>>> s.xs('lama', level=1)
a speed 45.0
weight 200.0
length 1.2
dtype: float64
"""
if not isinstance(key, tuple):
key = (key,)
if level is None:
level = 0
internal = self._internal
scols = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
+ [self.spark.column]
)
rows = [internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
if internal.index_level == len(key):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[self._internal.data_spark_column_names[0]].iloc[0]
index_spark_column_names = (
internal.index_spark_column_names[:level]
+ internal.index_spark_column_names[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def pct_change(self, periods: int = 1) -> "Series":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([90, 91, 85], index=[2, 4, 1])
>>> psser
2 90
4 91
1 85
dtype: int64
>>> psser.pct_change()
2 NaN
4 0.011111
1 -0.065934
dtype: float64
>>> psser.sort_index().pct_change()
1 NaN
2 0.058824
4 0.011111
dtype: float64
>>> psser.pct_change(periods=2)
2 NaN
4 NaN
1 -0.055556
dtype: float64
"""
scol = self.spark.column
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
prev_row = F.lag(scol, periods).over(window)
return self._with_new_scol((scol - prev_row) / prev_row).spark.analyzed
def combine_first(self, other: "Series") -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = ps.Series([1, np.nan])
>>> s2 = ps.Series([3, 4])
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
if not isinstance(other, ps.Series):
raise TypeError("`combine_first` only allows `Series` for parameter `other`")
if same_anchor(self, other):
this = self.spark.column
that = other.spark.column
combined = self._psdf
else:
combined = combine_frames(self._psdf, other._psdf)
this = combined["this"]._internal.spark_column_for(self._column_label)
that = combined["that"]._internal.spark_column_for(other._column_label)
# If `self` has missing value, use value of `other`
cond = F.when(this.isNull(), that).otherwise(this)
# If `self` and `other` come from same frame, the anchor should be kept
if same_anchor(self, other):
return self._with_new_scol(cond) # TODO: dtype?
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame.select(
*index_scols, cond.alias(self._internal.data_spark_column_names[0])
).distinct()
internal = self._internal.with_new_sdf(
sdf, index_fields=combined._internal.index_fields, data_fields=[None] # TODO: dtype?
)
return first_series(DataFrame(internal))
def dot(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame.
It can also be called using `self @ other` in Python >= 3.5.
.. note:: This API is slightly different from pandas when indexes from both Series
are not aligned. To match with pandas', it requires to read the whole data for,
for example, counting. pandas raises an exception; however, pandas-on-Spark
just proceeds and performs by ignoring mismatches with NaN permissively.
>>> pdf1 = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> pdf2 = pd.Series([1, 2, 3], index=[0, 1, 3])
>>> pdf1.dot(pdf2) # doctest: +SKIP
...
ValueError: matrices are not aligned
>>> psdf1 = ps.Series([1, 2, 3], index=[0, 1, 2])
>>> psdf2 = ps.Series([1, 2, 3], index=[0, 1, 3])
>>> psdf1.dot(psdf2) # doctest: +SKIP
5
Parameters
----------
other : Series, DataFrame.
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = ps.Series([0, 1, 2, 3])
>>> s.dot(s)
14
>>> s @ s
14
>>> psdf = ps.DataFrame({'x': [0, 1, 2, 3], 'y': [0, -1, -2, -3]})
>>> psdf
x y
0 0 0
1 1 -1
2 2 -2
3 3 -3
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s.dot(psdf)
...
x 14
y -14
dtype: int64
"""
if isinstance(other, DataFrame):
if not same_anchor(self, other):
if not self.index.sort_values().equals(other.index.sort_values()):
raise ValueError("matrices are not aligned")
other_copy = other.copy() # type: DataFrame
column_labels = other_copy._internal.column_labels
self_column_label = verify_temp_column_name(other_copy, "__self_column__")
other_copy[self_column_label] = self
self_psser = other_copy._psser_for(self_column_label)
product_pssers = [
cast(Series, other_copy._psser_for(label) * self_psser) for label in column_labels
]
dot_product_psser = DataFrame(
other_copy._internal.with_new_columns(product_pssers, column_labels=column_labels)
).sum()
return cast(Series, dot_product_psser).rename(self.name)
else:
assert isinstance(other, Series)
if not same_anchor(self, other):
if len(self.index) != len(other.index):
raise ValueError("matrices are not aligned")
return (self * other).sum()
def __matmul__(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def repeat(self, repeats: Union[int, "Series"]) -> "Series":
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or Series
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
Examples
--------
>>> s = ps.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
1 b
2 c
0 a
1 b
2 c
dtype: object
>>> ps.Series([1, 2, 3]).repeat(0)
Series([], dtype: int64)
"""
if not isinstance(repeats, (int, Series)):
raise TypeError(
"`repeats` argument must be integer or Series, but got {}".format(type(repeats))
)
if isinstance(repeats, Series):
if not same_anchor(self, repeats):
psdf = self.to_frame()
temp_repeats = verify_temp_column_name(psdf, "__temp_repeats__")
psdf[temp_repeats] = repeats
return (
psdf._psser_for(psdf._internal.column_labels[0])
.repeat(psdf[temp_repeats])
.rename(self.name)
)
else:
scol = F.explode(
F.array_repeat(self.spark.column, repeats.astype("int32").spark.column)
).alias(name_like_string(self.name))
sdf = self._internal.spark_frame.select(self._internal.index_spark_columns + [scol])
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
data_spark_columns=[scol_for(sdf, name_like_string(self.name))],
)
return first_series(DataFrame(internal))
else:
if repeats < 0:
raise ValueError("negative dimensions are not allowed")
psdf = self._psdf[[self.name]]
if repeats == 0:
return first_series(DataFrame(psdf._internal.with_filter(SF.lit(False))))
else:
return first_series(ps.concat([psdf] * repeats))
def asof(self, where: Union[Any, List]) -> Union[Scalar, "Series"]:
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
If there is no good value, NaN is returned.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
where : index or array-like of indices
Returns
-------
scalar or Series
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like
Return scalar or Series
Notes
-----
Indices are assumed to be sorted. Raises if this is not the case.
Examples
--------
>>> s = ps.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
A scalar `where`.
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20]).sort_index()
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
"""
should_return_series = True
if isinstance(self.index, ps.MultiIndex):
raise ValueError("asof is not supported for a MultiIndex")
if isinstance(where, (ps.Index, ps.Series, DataFrame)):
raise ValueError("where cannot be an Index, Series or a DataFrame")
if not self.index.is_monotonic_increasing:
raise ValueError("asof requires a sorted index")
if not is_list_like(where):
should_return_series = False
where = [where]
index_scol = self._internal.index_spark_columns[0]
index_type = self._internal.spark_type_for(index_scol)
cond = [
F.max(F.when(index_scol <= SF.lit(index).cast(index_type), self.spark.column))
for index in where
]
sdf = self._internal.spark_frame.select(cond)
if not should_return_series:
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
result = cast(pd.DataFrame, sdf.limit(1).toPandas()).iloc[0, 0]
return result if result is not None else np.nan
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.default_index_type", "distributed", "compute.max_rows", 1):
psdf = ps.DataFrame(sdf) # type: DataFrame
psdf.columns = pd.Index(where)
return first_series(psdf.transpose()).rename(self.name)
def mad(self) -> float:
"""
Return the mean absolute deviation of values.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.mad()
1.0
"""
sdf = self._internal.spark_frame
spark_column = self.spark.column
avg = unpack_scalar(sdf.select(F.avg(spark_column)))
mad = unpack_scalar(sdf.select(F.avg(F.abs(spark_column - avg))))
return mad
def unstack(self, level: int = -1) -> DataFrame:
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Notes
-----
Unlike pandas, pandas-on-Spark doesn't check whether an index is duplicated or not
because the checking of duplicated index requires scanning whole data which
can be quite expensive.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1).sort_index()
a b
one 1 2
two 3 4
>>> s.unstack(level=0).sort_index()
one two
a 1 3
b 2 4
"""
if not isinstance(self.index, ps.MultiIndex):
raise ValueError("Series.unstack only support for a MultiIndex")
index_nlevels = self.index.nlevels
if level > 0 and (level > index_nlevels - 1):
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(index_nlevels, level + 1)
)
elif level < 0 and (level < -index_nlevels):
raise IndexError(
"Too many levels: Index has only {} levels, {} is not a valid level number".format(
index_nlevels, level
)
)
internal = self._internal.resolved_copy
index_map = list(zip(internal.index_spark_column_names, internal.index_names))
pivot_col, column_label_names = index_map.pop(level)
index_scol_names, index_names = zip(*index_map)
col = internal.data_spark_column_names[0]
sdf = internal.spark_frame
sdf = sdf.groupby(list(index_scol_names)).pivot(pivot_col).agg(F.first(scol_for(sdf, col)))
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_scol_names],
index_names=list(index_names),
column_label_names=[column_label_names],
)
return DataFrame(internal)
def item(self) -> Scalar:
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of Series.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> psser = ps.Series([10])
>>> psser.item()
10
"""
return self.head(2)._to_internal_pandas().item()
def iteritems(self) -> Iterable[Tuple[Name, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
.. note:: Unlike pandas', the iteritems in pandas-on-Spark returns generator rather
zip object
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = ps.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print("Index : {}, Value : {}".format(index, value))
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
internal_index_columns = self._internal.index_spark_column_names
internal_data_column = self._internal.data_spark_column_names[0]
def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = row[internal_data_column]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
yield k, v
def items(self) -> Iterable[Tuple[Name, Any]]:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def droplevel(self, level: Union[int, Name, List[Union[int, Name]]]) -> "Series":
"""
Return Series with requested index level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
Returns
-------
Series
Series with requested index level(s) removed.
Examples
--------
>>> psser = ps.Series(
... [1, 2, 3],
... index=pd.MultiIndex.from_tuples(
... [("x", "a"), ("x", "b"), ("y", "c")], names=["level_1", "level_2"]
... ),
... )
>>> psser
level_1 level_2
x a 1
b 2
y c 3
dtype: int64
Removing specific index level by level
>>> psser.droplevel(0)
level_2
a 1
b 2
c 3
dtype: int64
Removing specific index level by name
>>> psser.droplevel("level_2")
level_1
x 1
x 2
y 3
dtype: int64
"""
return first_series(self.to_frame().droplevel(level=level, axis=0)).rename(self.name)
def tail(self, n: int = 5) -> "Series":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> psser = ps.Series([1, 2, 3, 4, 5])
>>> psser
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> psser.tail(3) # doctest: +SKIP
2 3
3 4
4 5
dtype: int64
"""
return first_series(self.to_frame().tail(n=n)).rename(self.name)
def explode(self) -> "Series":
"""
Transform each element of a list-like to a row.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Examples
--------
>>> psser = ps.Series([[1, 2, 3], [], [3, 4]])
>>> psser
0 [1, 2, 3]
1 []
2 [3, 4]
dtype: object
>>> psser.explode() # doctest: +SKIP
0 1.0
0 2.0
0 3.0
1 NaN
2 3.0
2 4.0
dtype: float64
"""
if not isinstance(self.spark.data_type, ArrayType):
return self.copy()
scol = F.explode_outer(self.spark.column).alias(name_like_string(self._column_label))
internal = self._internal.with_new_columns([scol], keep_order=False)
return first_series(DataFrame(internal))
def argsort(self) -> "Series":
"""
Return the integer indices that would sort the Series values.
Unlike pandas, the index order is not preserved in the result.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
Examples
--------
>>> psser = ps.Series([3, 3, 4, 1, 6, 2, 3, 7, 8, 7, 10])
>>> psser
0 3
1 3
2 4
3 1
4 6
5 2
6 3
7 7
8 8
9 7
10 10
dtype: int64
>>> psser.argsort().sort_index()
0 3
1 5
2 0
3 1
4 6
5 2
6 4
7 7
8 9
9 8
10 10
dtype: int64
"""
notnull = self.loc[self.notnull()]
sdf_for_index = notnull._internal.spark_frame.select(notnull._internal.index_spark_columns)
tmp_join_key = verify_temp_column_name(sdf_for_index, "__tmp_join_key__")
sdf_for_index, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_index, tmp_join_key
)
# sdf_for_index:
# +----------------+-----------------+
# |__tmp_join_key__|__index_level_0__|
# +----------------+-----------------+
# | 0| 0|
# | 1| 1|
# | 2| 2|
# | 3| 3|
# | 4| 4|
# +----------------+-----------------+
sdf_for_data = notnull._internal.spark_frame.select(
notnull.spark.column.alias("values"), NATURAL_ORDER_COLUMN_NAME
)
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, SPARK_DEFAULT_SERIES_NAME
)
# sdf_for_data:
# +---+------+-----------------+
# | 0|values|__natural_order__|
# +---+------+-----------------+
# | 0| 3| 25769803776|
# | 1| 3| 51539607552|
# | 2| 4| 77309411328|
# | 3| 1| 103079215104|
# | 4| 2| 128849018880|
# +---+------+-----------------+
sdf_for_data = sdf_for_data.sort(
scol_for(sdf_for_data, "values"), NATURAL_ORDER_COLUMN_NAME
).drop("values", NATURAL_ORDER_COLUMN_NAME)
tmp_join_key = verify_temp_column_name(sdf_for_data, "__tmp_join_key__")
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, tmp_join_key
)
# sdf_for_index: sdf_for_data:
# +----------------+-----------------+ +----------------+---+
# |__tmp_join_key__|__index_level_0__| |__tmp_join_key__| 0|
# +----------------+-----------------+ +----------------+---+
# | 0| 0| | 0| 3|
# | 1| 1| | 1| 4|
# | 2| 2| | 2| 0|
# | 3| 3| | 3| 1|
# | 4| 4| | 4| 2|
# +----------------+-----------------+ +----------------+---+
sdf = sdf_for_index.join(sdf_for_data, on=tmp_join_key).drop(tmp_join_key)
internal = self._internal.with_new_sdf(
spark_frame=sdf,
data_columns=[SPARK_DEFAULT_SERIES_NAME],
index_fields=[
InternalField(dtype=field.dtype) for field in self._internal.index_fields
],
data_fields=[None],
)
psser = first_series(DataFrame(internal))
return cast(
Series,
ps.concat([psser, self.loc[self.isnull()].spark.transform(lambda _: SF.lit(-1))]),
)
def argmax(self) -> int:
"""
Return int position of the largest value in the Series.
If the maximum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the maximum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax() # doctest: +SKIP
2
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
max_value = sdf.select(
F.max(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if max_value[1] is None:
raise ValueError("attempt to get argmax of an empty sequence")
elif max_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the maximum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == max_value[0]
).head()[0]
def argmin(self) -> int:
"""
Return int position of the smallest value in the Series.
If the minimum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the minimum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmin() # doctest: +SKIP
0
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
min_value = sdf.select(
F.min(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if min_value[1] is None:
raise ValueError("attempt to get argmin of an empty sequence")
elif min_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the minimum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == min_value[0]
).head()[0]
def compare(
self, other: "Series", keep_shape: bool = False, keep_equal: bool = False
) -> DataFrame:
"""
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
Returns
-------
DataFrame
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series(["a", "b", "c", "d", "e"])
>>> s2 = ps.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2).sort_index()
self other
1 b a
3 d b
Keep all original rows
>>> s1.compare(s2, keep_shape=True).sort_index()
self other
0 None None
1 b a
2 None None
3 d b
4 None None
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True).sort_index()
self other
0 a a
1 b a
2 c c
3 d b
4 e e
>>> reset_option("compute.ops_on_diff_frames")
"""
if same_anchor(self, other):
self_column_label = verify_temp_column_name(other.to_frame(), "__self_column__")
other_column_label = verify_temp_column_name(self.to_frame(), "__other_column__")
combined = DataFrame(
self._internal.with_new_columns(
[self.rename(self_column_label), other.rename(other_column_label)]
)
) # type: DataFrame
else:
if not self.index.equals(other.index):
raise ValueError("Can only compare identically-labeled Series objects")
combined = combine_frames(self.to_frame(), other.to_frame())
this_column_label = "self"
that_column_label = "other"
if keep_equal and keep_shape:
combined.columns = pd.Index([this_column_label, that_column_label])
return combined
this_data_scol = combined._internal.data_spark_columns[0]
that_data_scol = combined._internal.data_spark_columns[1]
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame
if keep_shape:
this_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(this_data_scol)
.alias(this_column_label)
)
this_field = combined._internal.data_fields[0].copy(
name=this_column_label, nullable=True
)
that_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(that_data_scol)
.alias(that_column_label)
)
that_field = combined._internal.data_fields[1].copy(
name=that_column_label, nullable=True
)
else:
sdf = sdf.filter(~this_data_scol.eqNullSafe(that_data_scol))
this_scol = this_data_scol.alias(this_column_label)
this_field = combined._internal.data_fields[0].copy(name=this_column_label)
that_scol = that_data_scol.alias(that_column_label)
that_field = combined._internal.data_fields[1].copy(name=that_column_label)
sdf = sdf.select(*index_scols, this_scol, that_scol, NATURAL_ORDER_COLUMN_NAME)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=combined._internal.index_fields,
column_labels=[(this_column_label,), (that_column_label,)],
data_spark_columns=[scol_for(sdf, this_column_label), scol_for(sdf, that_column_label)],
data_fields=[this_field, that_field],
column_label_names=[None],
)
return DataFrame(internal)
def align(
self,
other: Union[DataFrame, "Series"],
join: str = "outer",
axis: Optional[Axis] = None,
copy: bool = True,
) -> Tuple["Series", Union[DataFrame, "Series"]]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (Series, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> s2 = ps.Series(["g", "h", "i"], index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(s2)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
10 g
11 None
12 None
20 h
30 i
dtype: object
Align with the join type "inner":
>>> aligned_l, aligned_r = s1.align(s2, join="inner")
>>> aligned_l.sort_index()
10 7
dtype: int64
>>> aligned_r.sort_index()
10 g
dtype: object
Align with a DataFrame:
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(df)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
self_df = self.to_frame()
left, right = self_df.align(other, join=join, axis=axis, copy=False)
if left is self_df:
left_ser = self
else:
left_ser = first_series(left).rename(self.name)
return (left_ser.copy(), right.copy()) if copy else (left_ser, right)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Axis = 0,
) -> "Series":
"""
Select values between particular times of the day (example: 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
dtype: int64
>>> psser.between_time('0:15', '0:45')
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
dtype: int64
"""
return first_series(
self.to_frame().between_time(start_time, end_time, include_start, include_end, axis)
).rename(self.name)
def at_time(
self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0
) -> "Series":
"""
Select values at particular time of day (example: 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
Series
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
dtype: int64
>>> psser.at_time('12:00')
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
dtype: int64
"""
return first_series(self.to_frame().at_time(time, asof, axis)).rename(self.name)
def _cum(
self,
func: Callable[[Column], Column],
skipna: bool,
part_cols: Sequence["ColumnOrName"] = (),
ascending: bool = True,
) -> "Series":
# This is used to cummin, cummax, cumsum, etc.
if ascending:
window = (
Window.orderBy(F.asc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
else:
window = (
Window.orderBy(F.desc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
if skipna:
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# Manually sets nulls given the column defined above.
self.spark.column.isNull(),
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
else:
# Here, we use two Windows.
# One for real data.
# The other one for setting nulls after the first null it meets.
#
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 NaN
# 3 5.0 NaN
# 4 5.0 NaN
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# By going through with max, it sets True after the first time it meets null.
F.max(self.spark.column.isNull()).over(window),
# Manually sets nulls given the column defined above.
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
return self._with_new_scol(scol)
def _cumsum(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
psser = self
if isinstance(psser.spark.data_type, BooleanType):
psser = psser.spark.transform(lambda scol: scol.cast(LongType()))
elif not isinstance(psser.spark.data_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return psser._cum(F.sum, skipna, part_cols)
def _cumprod(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if isinstance(self.spark.data_type, BooleanType):
scol = self._cum(
lambda scol: F.min(F.coalesce(scol, SF.lit(True))), skipna, part_cols
).spark.column.cast(LongType())
elif isinstance(self.spark.data_type, NumericType):
num_zeros = self._cum(
lambda scol: F.sum(F.when(scol == 0, 1).otherwise(0)), skipna, part_cols
).spark.column
num_negatives = self._cum(
lambda scol: F.sum(F.when(scol < 0, 1).otherwise(0)), skipna, part_cols
).spark.column
sign = F.when(num_negatives % 2 == 0, 1).otherwise(-1)
abs_prod = F.exp(
self._cum(lambda scol: F.sum(F.log(F.abs(scol))), skipna, part_cols).spark.column
)
scol = F.when(num_zeros > 0, 0).otherwise(sign * abs_prod)
if isinstance(self.spark.data_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(self.spark.data_type),
self.spark.data_type.simpleString(),
)
)
return self._with_new_scol(scol)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeMethods)
str = CachedAccessor("str", StringMethods)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", PandasOnSparkPlotAccessor)
# ----------------------------------------------------------------------
def _apply_series_op(
self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False
) -> "Series":
psser_or_scol = op(self)
if isinstance(psser_or_scol, Series):
psser = psser_or_scol
else:
psser = self._with_new_scol(cast(Column, psser_or_scol))
if should_resolve:
internal = psser._internal.resolved_copy
return first_series(DataFrame(internal))
else:
return psser
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str_type,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Scalar:
"""
Applies sfun to the column and returns a scalar
Parameters
----------
sfun : the stats function to be used for aggregation
name : original pandas API name.
axis : used only for sanity check because series only support index axis.
numeric_only : not used by this implementation, but passed down by stats functions
"""
from inspect import signature
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
num_args = len(signature(sfun).parameters)
spark_column = self.spark.column
spark_type = self.spark.data_type
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
scol = cast(Callable[[Column], Column], sfun)(spark_column)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
scol = cast(Callable[[Column, DataType], Column], sfun)(spark_column, spark_type)
min_count = kwargs.get("min_count", 0)
if min_count > 0:
scol = F.when(Frame._count_expr(spark_column, spark_type) >= min_count, scol)
result = unpack_scalar(self._internal.spark_frame.select(scol))
return result if result is not None else np.nan
# Override the `groupby` to specify the actual return type annotation.
def groupby(
self,
by: Union[Name, "Series", List[Union[Name, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "SeriesGroupBy":
return cast(
"SeriesGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)
)
groupby.__doc__ = Frame.groupby.__doc__
def _build_groupby(
self, by: List[Union["Series", Label]], as_index: bool, dropna: bool
) -> "SeriesGroupBy":
from pyspark.pandas.groupby import SeriesGroupBy
return SeriesGroupBy._build(self, by, as_index=as_index, dropna=dropna)
def __getitem__(self, key: Any) -> Any:
try:
if (isinstance(key, slice) and any(type(n) == int for n in [key.start, key.stop])) or (
type(key) == int
and not isinstance(self.index.spark.data_type, (IntegerType, LongType))
):
# Seems like pandas Series always uses int as positional search when slicing
# with ints, searches based on index values when the value is int.
return self.iloc[key]
return self.loc[key]
except SparkPandasIndexingError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
def __getattr__(self, item: str_type) -> Any:
if item.startswith("__"):
raise AttributeError(item)
if hasattr(MissingPandasLikeSeries, item):
property_or_func = getattr(MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'Series' object has no attribute '{}'".format(item))
def _to_internal_pandas(self) -> pd.Series:
"""
Return a pandas Series directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._psdf._internal.to_pandas_frame[self.name]
def __repr__(self) -> str_type:
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string(name=self.name, dtype=self.dtype)
pser = self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]
pser_length = len(pser)
pser = pser.iloc[:max_display_count]
if pser_length > max_display_count:
repr_string = pser.to_string(length=True)
rest, prev_footer = repr_string.rsplit("\n", 1)
match = REPR_PATTERN.search(prev_footer)
if match is not None:
length = match.group("length")
dtype_name = str(self.dtype.name)
if self.name is None:
footer = "\ndtype: {dtype}\nShowing only the first {length}".format(
length=length, dtype=pprint_thing(dtype_name)
)
else:
footer = (
"\nName: {name}, dtype: {dtype}"
"\nShowing only the first {length}".format(
length=length, name=self.name, dtype=pprint_thing(dtype_name)
)
)
return rest + footer
return pser.to_string(name=self.name, dtype=self.dtype)
def __dir__(self) -> Iterable[str_type]:
if not isinstance(self.spark.data_type, StructType):
fields = []
else:
fields = [f for f in self.spark.data_type.fieldNames() if " " not in f]
return list(super().__dir__()) + fields
def __iter__(self) -> None:
return MissingPandasLikeSeries.__iter__(self)
if sys.version_info >= (3, 7):
# In order to support the type hints such as Series[...]. See DataFrame.__class_getitem__.
def __class_getitem__(cls, params: Any) -> Type[SeriesType]:
return _create_type_for_series_type(params)
elif (3, 5) <= sys.version_info < (3, 7):
# The implementation is in its metaclass so this flag is needed to distinguish
# pandas-on-Spark Series.
is_series = None
def unpack_scalar(sdf: SparkDataFrame) -> Any:
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = cast(pd.DataFrame, sdf.limit(2).toPandas())
assert len(l) == 1, (sdf, l)
row = l.iloc[0]
l2 = list(row)
assert len(l2) == 1, (row, l2)
return l2[0]
@overload
def first_series(df: DataFrame) -> Series:
...
@overload
def first_series(df: pd.DataFrame) -> pd.Series:
...
def first_series(df: Union[DataFrame, pd.DataFrame]) -> Union[Series, pd.Series]:
"""
Takes a DataFrame and returns the first column of the DataFrame as a Series
"""
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
if isinstance(df, DataFrame):
return df._psser_for(df._internal.column_labels[0])
else:
return df[df.columns[0]]
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.series
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.series.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.series tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.series,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
xwolf12/scikit-learn
|
sklearn/feature_extraction/text.py
|
110
|
50157
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
xhqu1981/pymatgen
|
pymatgen/analysis/diffraction/xrd.py
|
2
|
14977
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from math import sin, cos, asin, pi, degrees, radians
import os
import collections
import numpy as np
import json
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
This module implements an XRD pattern calculator.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "5/22/14"
# XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(object):
"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent facets. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
# Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
# Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_xrd_data(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the XRD data for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRD pattern) in the form of
[[two_theta, intensity, {(h, k, l): mult}, d_hkl], ...]
Two_theta is in degrees. Intensity is in arbitrary units and if
scaled (the default), has a maximum value of 100 for the highest
peak. {(h, k, l): mult} is a dict of Miller indices for all
diffracted lattice facets contributing to that intensity and
their multiplicities. d_hkl is the interplanar spacing.
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species_and_occu.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
# Force miller indices to be integers.
hkl = [int(round(i)) for i in hkl]
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
# Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
# Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
# Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
# Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
XRDCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
data = []
for k in sorted(peaks.keys()):
v = peaks[k]
scaled_intensity = v[0] / max_intensity * 100 if scaled else v[0]
fam = get_unique_families(v[1])
if scaled_intensity > XRDCalculator.SCALED_INTENSITY_TOL:
data.append([k, scaled_intensity, fam, v[2]])
return data
def get_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Returns the XRD plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
Returns:
(matplotlib.pyplot)
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(16, 10)
for two_theta, i, hkls, d_hkl in self.get_xrd_data(
structure, two_theta_range=two_theta_range):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl) for hkl in hkls.keys()])
plt.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
plt.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=16)
plt.xlabel(r"$2\theta$ ($^\circ$)")
plt.ylabel("Intensities (scaled)")
plt.tight_layout()
return plt
def show_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Shows the XRD plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks).show()
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break
if not found:
unique[hkl1].append(hkl1)
pretty_unique = {}
for k, v in unique.items():
pretty_unique[sorted(v)[-1]] = len(v)
return pretty_unique
|
mit
|
adamgreenhall/scikit-learn
|
examples/classification/plot_classification_probability.py
|
242
|
2624
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
iver56/cross-adaptive-audio
|
plot.py
|
1
|
6325
|
import argparse
import json
import numpy as np
import matplotlib
from matplotlib.font_manager import FontProperties
import os
import settings
from itertools import cycle
class Plot(object):
"""
Plot cumulative max similarity for all experiments
"""
def __init__(self):
# Python 2 and 3 compatibility hack
try:
input = raw_input
except NameError:
pass
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--small-font',
nargs='?',
dest='small_font',
help='Use a small font in the legend',
const=True,
required=False,
default=False
)
arg_parser.add_argument(
'--max',
nargs='?',
dest='max',
help='Show max series',
const=True,
required=False,
default=False
)
arg_parser.add_argument(
'--avg',
nargs='?',
dest='avg',
help='Show avg series',
const=True,
required=False,
default=False
)
arg_parser.add_argument(
'--label',
dest='label',
help='Which arg to use as label',
required=False,
default=None
)
arg_parser.add_argument(
'--output',
dest='output',
help='Output image file (PNG). If specified, interactive window will not appear.',
required=False,
default=None
)
arg_parser.add_argument(
'--agg',
nargs='?',
dest='use_agg',
help='Use this argument if you encounter errors like "_tkinter.TclError: no display'
' name and no $DISPLAY environment variable" in a Linux environment',
const=True,
required=False,
default=False
)
args = arg_parser.parse_args()
if args.use_agg:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
experiments = {}
for root, dirs, files in os.walk(settings.STATS_DATA_DIRECTORY):
if dirs:
for experiment_dir in dirs:
if 'test' in experiment_dir:
continue
stats_file_path = os.path.join(
settings.STATS_DATA_DIRECTORY,
experiment_dir,
'stats.json'
)
if os.path.exists(stats_file_path):
experiment_id = experiment_dir.split('__')[1]
if experiment_id not in experiments:
experiments[experiment_id] = []
experiments[experiment_id].append(stats_file_path)
all_series = []
all_series_labels = []
for experiment_id in experiments:
stats_data_objects = []
for stats_file_path in experiments[experiment_id]:
with open(stats_file_path) as stats_file:
stats = json.load(stats_file)
stats_data_objects.append(stats)
experiment_series = [] # series for this experiment
for stats_data_object in stats_data_objects:
# compute cumulative maximum similarity
max_similarity = stats_data_object['generations'][0]['similarity_max']
similarity_series = []
for generation in stats_data_object['generations']:
if generation['similarity_max'] > max_similarity:
max_similarity = generation['similarity_max']
similarity_series.append(max_similarity)
experiment_series.append(similarity_series)
# take the average of experiment series
avg_similarity_values = []
for k in range(len(experiment_series[0])):
max_similarity = max(series[k] for series in experiment_series)
avg_similarity = sum(series[k] for series in experiment_series) / len(experiment_series)
avg_similarity_values.append({
'max': max_similarity,
'avg': avg_similarity
})
all_series.append(avg_similarity_values)
if args.label is None:
print(stats_data_objects[0]['args'])
label = input('label name? ')
all_series_labels.append(label)
else:
all_series_labels.append(stats_data_objects[0]['args'][args.label])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Best individual')
ax.set_ylabel('similarity measure')
colors = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf'
]
color_cycle = cycle(colors).next
handles = []
for i, series in enumerate(all_series):
color = color_cycle()
x = np.array(range(len(series)))
if args.max:
max_series_plot, = plt.plot(
x,
np.array([y['max'] for y in series]),
label=str(all_series_labels[i]) + ' (max)',
color=color,
linestyle='-.'
)
handles.append(max_series_plot)
if args.avg:
avg_series_plot, = plt.plot(
x,
np.array([y['avg'] for y in series]),
label=str(all_series_labels[i]) + ' (avg)',
color=color
)
handles.append(avg_series_plot)
font_p = FontProperties()
if args.small_font:
font_p.set_size('small')
plt.legend(handles=handles, prop=font_p, loc='best')
ax.set_xlabel('# generations')
if args.output is None:
plt.show()
else:
plt.savefig(args.output, dpi=300)
if __name__ == "__main__":
Plot()
|
gpl-3.0
|
zak-k/cartopy
|
lib/cartopy/examples/wmts.py
|
3
|
1039
|
__tags__ = ['Web services']
"""
Interactive WMTS (Web Map Tile Service)
---------------------------------------
This example demonstrates the interactive pan and zoom capability
supported by an OGC web services Web Map Tile Service (WMTS) aware axes.
The example WMTS layer is a single composite of data sampled over nine days
in April 2012 and thirteen days in October 2012 showing the Earth at night.
It does not vary over time.
The imagery was collected by the Suomi National Polar-orbiting Partnership
(Suomi NPP) weather satellite operated by the United States National Oceanic
and Atmospheric Administration (NOAA).
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
def main():
url = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
layer = 'VIIRS_CityLights_2012'
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_wmts(url, layer)
ax.set_extent((-15, 25, 35, 60))
plt.title('Suomi NPP Earth at night April/October 2012')
plt.show()
if __name__ == '__main__':
main()
|
lgpl-3.0
|
quantopian/research_public
|
research/ml_algo.py
|
2
|
17465
|
# https://www.quantopian.com/posts/machine-learning-alpha-with-risk-constraints
# https://www.quantopian.com/posts/machine-learning-on-quantopian-part-3-building-an-algorithm?utm_campaign=machine-learning-on-quantopian-part-3-building-an-algorithm&utm_medium=email&utm_source=forums
from collections import OrderedDict
from time import time
import pandas as pd
import numpy as np
from sklearn import ensemble, preprocessing, metrics, linear_model
from quantopian.algorithm import (
attach_pipeline,
date_rules,
order_optimal_portfolio,
pipeline_output,
record,
schedule_function,
set_commission,
set_slippage,
time_rules,
)
import quantopian.optimize as opt
from quantopian.pipeline import Pipeline
from quantopian.pipeline.classifiers.fundamentals import Sector as _Sector
from quantopian.pipeline.data import Fundamentals
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import (
CustomFactor,
Returns,
MACDSignal,
)
from quantopian.pipeline.filters import QTradableStocksUS
from quantopian.pipeline.experimental import risk_loading_pipeline
from zipline.utils.numpy_utils import (
repeat_first_axis,
repeat_last_axis,
)
# If you have eventvestor, it's a good idea to screen out aquisition targets
# Comment out & ~IsAnnouncedAcqTarget() as well. You can also run this over
# the free period.
# from quantopian.pipeline.filters.eventvestor import IsAnnouncedAcqTarget
# Will be split 50% long and 50% short
N_STOCKS_TO_TRADE = 500
# Number of days to train the classifier on, easy to run out of memory here
ML_TRAINING_WINDOW = 252
# train on returns over N days into the future
PRED_N_FORWARD_DAYS = 5
# How often to trade, for daily, set to date_rules.every_day()
TRADE_FREQ = date_rules.week_start(days_offset=1) #date_rules.every_day()
class Sector(_Sector):
window_safe = True
class MeanReversion1M(CustomFactor):
inputs = (Returns(window_length=21),)
window_length = 252
def compute(self, today, assets, out, monthly_rets):
np.divide(
monthly_rets[-1] - np.nanmean(monthly_rets, axis=0),
np.nanstd(monthly_rets, axis=0),
out=out,
)
class MoneyflowVolume5d(CustomFactor):
inputs = (USEquityPricing.close, USEquityPricing.volume)
# we need one more day to get the direction of the price on the first
# day of our desired window of 5 days
window_length = 6
def compute(self, today, assets, out, close_extra, volume_extra):
# slice off the extra row used to get the direction of the close
# on the first day
close = close_extra[1:]
volume = volume_extra[1:]
dollar_volume = close * volume
denominator = dollar_volume.sum(axis=0)
difference = np.diff(close_extra, axis=0)
direction = np.where(difference > 0, 1, -1)
numerator = (direction * dollar_volume).sum(axis=0)
np.divide(numerator, denominator, out=out)
class PriceOscillator(CustomFactor):
inputs = (USEquityPricing.close,)
window_length = 252
def compute(self, today, assets, out, close):
four_week_period = close[-20:]
np.divide(
np.nanmean(four_week_period, axis=0),
np.nanmean(close, axis=0),
out=out,
)
out -= 1
class Trendline(CustomFactor):
inputs = [USEquityPricing.close]
window_length = 252
_x = np.arange(window_length)
_x_var = np.var(_x)
def compute(self, today, assets, out, close):
x_matrix = repeat_last_axis(
(self.window_length - 1) / 2 - self._x,
len(assets),
)
y_bar = np.nanmean(close, axis=0)
y_bars = repeat_first_axis(y_bar, self.window_length)
y_matrix = close - y_bars
np.divide(
(x_matrix * y_matrix).sum(axis=0) / self._x_var,
self.window_length,
out=out,
)
class Volatility3M(CustomFactor):
inputs = [Returns(window_length=2)]
window_length = 63
def compute(self, today, assets, out, rets):
np.nanstd(rets, axis=0, out=out)
class AdvancedMomentum(CustomFactor):
inputs = (USEquityPricing.close, Returns(window_length=126))
window_length = 252
def compute(self, today, assets, out, prices, returns):
np.divide(
(
(prices[-21] - prices[-252]) / prices[-252] -
prices[-1] - prices[-21]
) / prices[-21],
np.nanstd(returns, axis=0),
out=out,
)
asset_growth_3m = Returns(
inputs=[Fundamentals.total_assets],
window_length=63,
)
asset_to_equity_ratio = (
Fundamentals.total_assets.latest /
Fundamentals.common_stock_equity.latest
)
capex_to_cashflows = (
Fundamentals.capital_expenditure.latest /
Fundamentals.free_cash_flow.latest
)
ebitda_yield = (
(Fundamentals.ebitda.latest * 4) /
USEquityPricing.close.latest
)
ebita_to_assets = (
(Fundamentals.ebit.latest * 4) /
Fundamentals.total_assets.latest
)
return_on_total_invest_capital = Fundamentals.roic.latest
mean_reversion_1m = MeanReversion1M()
macd_signal_10d = MACDSignal(
fast_period=12,
slow_period=26,
signal_period=10,
)
moneyflow_volume_5d = MoneyflowVolume5d()
net_income_margin = Fundamentals.net_margin.latest
operating_cashflows_to_assets = (
(Fundamentals.operating_cash_flow.latest * 4) /
Fundamentals.total_assets.latest
)
price_momentum_3m = Returns(window_length=63)
price_oscillator = PriceOscillator()
trendline = Trendline()
returns_39w = Returns(window_length=215)
volatility_3m = Volatility3M()
advanced_momentum = AdvancedMomentum()
features = {
'Asset Growth 3M': asset_growth_3m,
'Asset to Equity Ratio': asset_to_equity_ratio,
'Capex to Cashflows': capex_to_cashflows,
'EBIT to Assets': ebita_to_assets,
'EBITDA Yield': ebitda_yield,
'MACD Signal Line': macd_signal_10d,
'Mean Reversion 1M': mean_reversion_1m,
'Moneyflow Volume 5D': moneyflow_volume_5d,
'Net Income Margin': net_income_margin,
'Operating Cashflows to Assets': operating_cashflows_to_assets,
'Price Momentum 3M': price_momentum_3m,
'Price Oscillator': price_oscillator,
'Return on Invest Capital': return_on_total_invest_capital,
'39 Week Returns': returns_39w,
'Trendline': trendline,
'Volatility 3m': volatility_3m,
'Advanced Momentum': advanced_momentum,
}
def shift_mask_data(features,
labels,
n_forward_days,
lower_percentile,
upper_percentile):
"""Align features to the labels ``n_forward_days`` into the future and
return the discrete, flattened features and masked labels.
Parameters
----------
features : np.ndarray
A 3d array of (days, assets, feature).
labels : np.ndarray
The labels to predict.
n_forward_days : int
How many days into the future are we predicting?
lower_percentile : float
The lower percentile in the range [0, 100].
upper_percentile : float
The upper percentile in the range [0, 100].
Returns
-------
selected_features : np.ndarray
The flattened features that are not masked out.
selected_labels : np.ndarray
The labels that are not masked out.
"""
# Slice off rolled elements
shift_by = n_forward_days + 1
aligned_features = features[:-shift_by]
aligned_labels = labels[shift_by:]
cutoffs = np.nanpercentile(
aligned_labels,
[lower_percentile, upper_percentile],
axis=1,
)
discrete_labels = np.select(
[
aligned_labels <= cutoffs[0, :, np.newaxis],
aligned_labels >= cutoffs[1, :, np.newaxis],
],
[-1, 1],
)
# flatten the features per day
flattened_features = aligned_features.reshape(
-1,
aligned_features.shape[-1],
)
# Drop stocks that did not move much, meaning they are in between
# ``lower_percentile`` and ``upper_percentile``.
mask = discrete_labels != 0
selected_features = flattened_features[mask.ravel()]
selected_labels = discrete_labels[mask]
return selected_features, selected_labels
class ML(CustomFactor):
"""
"""
train_on_weekday = 1
def __init__(self, *args, **kwargs):
CustomFactor.__init__(self, *args, **kwargs)
self._imputer = preprocessing.Imputer()
self._scaler = preprocessing.MinMaxScaler()
self._classifier = linear_model.SGDClassifier(penalty='elasticnet')
self.trained = False
#ensemble.AdaBoostClassifier(
# random_state=1337,
# n_estimators=50,
#)
def _compute(self, *args, **kwargs):
ret = CustomFactor._compute(self, *args, **kwargs)
# reset the day counter so that we will begin training at the start of
# the next _compute call
self._day_counter = -1
return ret
def _train_model(self, today, returns, inputs):
log.info('training model for window starting on: {}', today)
imputer = self._imputer
scaler = self._scaler
classifier = self._classifier
features, labels = shift_mask_data(
np.dstack(inputs),
returns,
n_forward_days=PRED_N_FORWARD_DAYS,
lower_percentile=30,
upper_percentile=70,
)
features = scaler.fit_transform(imputer.fit_transform(features))
start = time()
classifier.fit(features, labels)
log.info('training took {} secs', time() - start)
self.trained = True
def _maybe_train_model(self, today, returns, inputs):
if (today.weekday() == self.train_on_weekday) or not self.trained:
self._train_model(today, returns, inputs)
def compute(self, today, assets, out, returns, *inputs):
# inputs is a list of factors, for example, assume we have 2 alpha
# signals, 3 stocks, and a lookback of 2 days. Each element in the
# inputs list will be data of one signal, so len(inputs) == 2. Then
# each element will contain a 2-D array of shape [time x stocks]. For
# example:
# inputs[0]:
# [[1, 3, 2], # factor 1 rankings of day t-1 for 3 stocks
# [3, 2, 1]] # factor 1 rankings of day t for 3 stocks
# inputs[1]:
# [[2, 3, 1], # factor 2 rankings of day t-1 for 3 stocks
# [1, 2, 3]] # factor 2 rankings of day t for 3 stocks
self._maybe_train_model(today, returns, inputs)
# Predict
# Get most recent factor values (inputs always has the full history)
last_factor_values = np.vstack([input_[-1] for input_ in inputs]).T
last_factor_values = self._imputer.transform(last_factor_values)
last_factor_values = self._scaler.transform(last_factor_values)
# Predict the probability for each stock going up
# (column 2 of the output of .predict_proba()) and
# return it via assignment to out.
#out[:] = self._classifier.predict_proba(last_factor_values)[:, 1]
out[:] = self._classifier.predict(last_factor_values)
def make_ml_pipeline(universe, window_length=21, n_forward_days=5):
pipeline_columns = OrderedDict()
# ensure that returns is the first input
pipeline_columns['Returns'] = Returns(
inputs=(USEquityPricing.open,),
mask=universe, window_length=n_forward_days + 1,
)
# rank all the factors and put them after returns
pipeline_columns.update({
k: v.rank(mask=universe) for k, v in features.items()
})
# Create our ML pipeline factor. The window_length will control how much
# lookback the passed in data will have.
pipeline_columns['ML'] = ML(
inputs=pipeline_columns.values(),
window_length=window_length + 1,
mask=universe,
)
pipeline_columns['Sector'] = Sector()
return Pipeline(screen=universe, columns=pipeline_columns)
def initialize(context):
"""
Called once at the start of the algorithm.
"""
set_slippage(slippage.FixedSlippage(spread=0.00))
set_commission(commission.PerShare(cost=0, min_trade_cost=0))
schedule_function(
rebalance,
TRADE_FREQ,
time_rules.market_open(minutes=1),
)
# Record tracking variables at the end of each day.
schedule_function(
record_vars,
date_rules.every_day(),
time_rules.market_close(),
)
# Set up universe, alphas and ML pipline
context.universe = QTradableStocksUS()
# if you are using IsAnnouncedAcqTarget, uncomment the next line
# context.universe &= IsAnnouncedAcqTarget()
ml_pipeline = make_ml_pipeline(
context.universe,
n_forward_days=PRED_N_FORWARD_DAYS,
window_length=ML_TRAINING_WINDOW,
)
# Create our dynamic stock selector.
attach_pipeline(ml_pipeline, 'alpha_model')
# Add the risk pipeline
attach_pipeline(risk_loading_pipeline(), 'risk_factors')
context.past_predictions = {}
context.hold_out_accuracy = 0
context.hold_out_log_loss = 0
context.hold_out_returns_spread_bps = 0
def evaluate_and_shift_hold_out(output, context):
# Look at past predictions to evaluate classifier accuracy on hold-out data
# A day has passed, shift days and drop old ones
context.past_predictions = {
k - 1: v
for k, v in context.past_predictions.iteritems()
if k > 0
}
if 0 in context.past_predictions:
# Past predictions for the current day exist, so we can use todays'
# n-back returns to evaluate them
raw_returns = output['Returns']
raw_predictions = context.past_predictions[0]
# Join to match up equities
returns, predictions = raw_returns.align(raw_predictions, join='inner')
# Binarize returns
returns_binary = returns > returns.median()
predictions_binary = predictions > 0.5
# Compute performance metrics
context.hold_out_accuracy = metrics.accuracy_score(
returns_binary.values,
predictions_binary.values,
)
context.hold_out_log_loss = metrics.log_loss(
returns_binary.values,
predictions.values,
)
long_rets = returns[predictions_binary == 1].mean()
short_rets = returns[predictions_binary == 0].mean()
context.hold_out_returns_spread_bps = (long_rets - short_rets) * 10000
# Store current predictions
context.past_predictions[PRED_N_FORWARD_DAYS] = context.predicted_probs
def before_trading_start(context, data):
"""
Called every day before market open.
"""
output = pipeline_output('alpha_model')
context.predicted_probs = output['ML']
context.predicted_probs.index.rename(['date', 'equity'], inplace=True)
context.risk_loadings = pipeline_output('risk_factors')
evaluate_and_shift_hold_out(output, context)
# These are the securities that we are interested in trading each day.
context.security_list = context.predicted_probs.index
def rebalance(context, data):
"""
Execute orders according to our schedule_function() timing.
"""
predictions = context.predicted_probs
# Filter out stocks that can not be traded
predictions = predictions.loc[data.can_trade(predictions.index)]
# Select top and bottom N stocks
n_long_short = min(N_STOCKS_TO_TRADE // 2, len(predictions) // 2)
predictions_top_bottom = pd.concat([
predictions.nlargest(n_long_short),
predictions.nsmallest(n_long_short),
])
# If classifier predicts many identical values, the top might contain
# duplicate stocks
predictions_top_bottom = predictions_top_bottom.iloc[
~predictions_top_bottom.index.duplicated()
]
# predictions are probabilities ranging from 0 to 1
predictions_top_bottom = (predictions_top_bottom - 0.5) * 2
# pull in the risk factor loadings
risk_loadings = context.risk_loadings
# Setup Optimization Objective
# Factor-weighted portfolio
objective = opt.TargetWeights(predictions_top_bottom)
# Setup Optimization Constraints
constrain_gross_leverage = opt.MaxGrossExposure(1.0)
constrain_pos_size = opt.PositionConcentration.with_equal_bounds(
-0.02,
+0.02,
)
market_neutral = opt.DollarNeutral()
if predictions_top_bottom.index.duplicated().any():
log.debug(predictions_top_bottom.head())
risk_neutral = opt.experimental.RiskModelExposure(
risk_model_loadings=risk_loadings
)
# Run the optimization. This will calculate new portfolio weights and
# manage moving our portfolio toward the target.
order_optimal_portfolio(
objective=objective,
constraints=[
constrain_gross_leverage,
constrain_pos_size,
market_neutral,
risk_neutral
],
)
def record_vars(context, data):
"""
Plot variables at the end of each day.
"""
record(
leverage=context.account.leverage,
hold_out_accuracy=context.hold_out_accuracy,
hold_out_log_loss=context.hold_out_log_loss,
hold_out_returns_spread_bps=context.hold_out_returns_spread_bps,
)
def handle_data(context, data):
pass
|
apache-2.0
|
philrosenfield/TPAGB-calib
|
tpagb_calibration/plotting/interactive_match_cmdlimits.py
|
1
|
7262
|
#!/usr/bin/env python
import argparse
import matplotlib.pylab as plt
import numpy as np
import os
import sys
import time
from astropy.io import fits
from ResolvedStellarPops.tpagb_path_config import tpagb_path
def move_on(ok, msg='0 to move on: '):
ok = int(raw_input(msg))
time.sleep(1)
return ok
def find_match_limits(phot, phot_ext, comp1=99., comp2=None, color_only=False,
xlim=None, ylim=None):
"""
click color limits on a cmd and mag1 mag2 limits on a plot of mag1 vs mag2
"""
mag1 = phot['mag1_%s' % phot_ext]
mag2 = phot['mag2_%s' % phot_ext]
col = mag1 - mag2
fig, ax = plt.subplots()
ax.plot(col, mag2, 'o', color='k', ms=3, alpha=0.3, mec='none')
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ax.get_ylim()[::-1])
if comp2 is not None:
ax.hlines(comp2, *ax.get_xlim())
else:
comp2 = 99.
ok = 1
while ok == 1:
print 'click on color min then color max'
pts = plt.ginput(2, timeout=-1)
colmin, colmax = [pts[i][0] for i in range(2)]
ax.vlines(colmin, *ax.get_ylim())
ax.vlines(colmax, *ax.get_ylim())
plt.draw()
ok = move_on(0)
plt.close()
inds, = np.nonzero((col < colmax) & (col > colmin))
data = (colmin, colmax)
if not color_only:
fig, ax = plt.subplots()
ax.plot(mag1, mag2, '.', color='k')
ok = 1
while ok == 1:
print 'click the bright mag value of mag1 and mag2, click a second time to finish'
pts = plt.ginput(2, timeout=-1)
mag1max, mag2max = pts[0]
ax.plot(mag1max, mag2max, 'o', color='r')
plt.draw()
ok = move_on(ok)
plt.close()
inds, = np.nonzero((mag1 < comp1) & (mag1 > mag1max) &
(mag2 < comp2) & (mag2 > mag2max) &
(col < colmax) & (col > colmin))
fig, ax = plt.subplots()
ax.plot(col, mag2, '.', color='k')
ax.plot(col[inds], mag2[inds], '.', color='r')
ax.set_ylim(ax.get_ylim()[::-1])
ax.hlines(comp2, *ax.get_xlim(), lw=2)
ax.vlines(colmin, *ax.get_ylim(), lw=2)
ax.vlines(colmax, *ax.get_ylim(), lw=2)
if not color_only:
ax.hlines(mag2max, *ax.get_xlim(), lw=2)
data = (colmin, colmax, mag1max, mag2max)
plt.draw()
return data
def find_gates(target):
import glob
here = os.getcwd()
os.chdir(target)
# read match file for mag1, mag2
phot = glob.glob1('.', '*match')
params = glob.glob1('.', '*param')
for i, param in enumerate(params):
# read param file
mag1, mag2 = np.genfromtxt(phot[i], unpack=True)
col = mag1 - mag2
lines = open(param, 'r').readlines()
colmin, colmax = map(float, lines[4].split()[3:-1])
mag1min, mag1max = map(float, lines[5].split()[:-1])
#mag2min, mag2max = map(float, lines[5].split()[:-1])
# click around
fig, ax = plt.subplots()
ax.plot(col, mag2, ',', color='k', alpha=0.2)
ax.set_ylim(mag1max, mag1min)
ax.set_xlim(colmin, colmax)
ok = 1
while ok == 1:
print 'click '
pts = np.asarray(plt.ginput(n=4, timeout=-1))
exclude_gate = '1 {} 0 \n'.format(' '.join(['%.4f' % p for p in pts.flatten()]))
pts = np.append(pts, pts[0]).reshape(5,2)
ax.plot(pts[:,0], pts[:,1], color='r', lw=3, alpha=0.3)
plt.draw()
ok = move_on(0)
lines[7] = exclude_gate
# not so simple ... need them to be parallelograms.
# PASS!
# write new param file with exclude/include gate
os.system('mv {0} {0}_bkup'.format(param))
with open(param, 'w') as outp:
[outp.write(l) for l in lines]
print('wrote %s' % param)
os.chdir(here)
def match_limits(color_only=False, data_file='snap_galaxies.dat',
target=None):
plt.ion()
new_lines = '# target comp_nir1 comp_nir2 comp_opt1 comp_opt2 Av mTRGB mTRGBerr dmod colmin colmax mag1max mag2max opt1 opt2 opt_phot opt_fake\n'
data_loc = os.path.join(tpagb_path, 'SNAP/data/angst_no_trim')
lines = open(os.path.join(tpagb_path, 'SNAP/tables/{}'.format(data_file)), 'r').readlines()
if target is not None:
lines = [l for l in lines if target in l]
for line in lines:
if line.startswith('#'):
continue
target, comp_nir1, comp_nir2, comp_opt1, comp_opt2, Av, mTRGB, mTRGBerr, dmod, cmin, cmax, opt1, opt2, filter1, filter2, opt_phot, opt_fake = line.split()
opt_phot = os.path.join(data_loc, opt_phot)
opt_fake = os.path.join(data_loc, opt_fake)
assert os.path.isfile(opt_phot), 'no opt phot'
assert os.path.isfile(opt_fake), 'no opt fake'
print target
phot = fits.getdata(opt_phot)
phot_ext = 'acs'
if 'wfpc2' in opt_phot:
phot_ext = 'wfpc2'
ok = 1
while ok == 1:
data = find_match_limits(phot, phot_ext, comp1=float(comp_opt1),
comp2=float(comp_opt2),
xlim=(float(cmin), float(cmax)),
color_only=color_only)
ok = move_on(0)
plt.close()
partial_line = ' '.join([target, comp_nir1, comp_nir2, comp_opt1,
comp_opt2, Av, mTRGB, mTRGBerr, dmod])
end_line = ' '.join([opt1, opt2, opt_phot, opt_fake])
if color_only:
colmin, colmax = data
data_str = '%.2f %.2f' % (colmin, colmax)
else:
colmin, colmax, mag1max, mag2max = data
data_str = '%.2f %.2f %.2f %.2f' % (colmin, colmax, mag1max, mag2max)
new_line = '%s %s %s \n' % (partial_line, data_str, end_line)
print new_line
new_lines += new_line
print new_lines
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find color mag limits of CMDs interactively")
parser.add_argument('-c', '--color_only', action='store_true',
help='skip the magnitude finding')
parser.add_argument('-d', '--data_file', type=str, default='snap_galaxies.dat',
help='data table in [tpagb_path]/SNAP/tables to use')
parser.add_argument('-v', '--pdb', action='store_true',
help='toggle debugging')
parser.add_argument('-t', '--target', type=str, default=None,
help='name of one target in the data_file')
parser.add_argument('-e', '--exgates', action='store_true',
help='Do exclude gates instead of match_limits')
args = parser.parse_args(sys.argv[1:])
color_only = args.color_only
if args.pdb:
import pdb
pdb.set_trace()
if args.exgates:
assert args.target is not None, \
'Must supply target if finding exclude gates'
find_gates(args.target)
else:
match_limits(color_only=color_only, data_file=args.data_file, target=args.target)
|
bsd-3-clause
|
modflowpy/flopydoc
|
docs/pysrc/ex_lake.py
|
1
|
3491
|
# ExampleLake - Flopy example of multi-layer steady model with one fixed head cell
# This is an example of steady flow in a multi-layer model.
# Heads are fixed along all outer boundaries to 100 meters.
# In the center cell in the top layer, the head is fixed to 90 meters.
# Import the Python packages that we need
import mf
import numpy as np
import mfreadbinaries as mfrdbin
import matplotlib.pyplot as mtpl
import os
import tempfile
# name an path of the MODFLOW run
name = 'ExLake'
# Define the folder workspace (optionnal, if omitted the output files
# will be created in the folder where this script is located).
model_ws = os.path.join(tempfile.gettempdir(), '00MF_ws')
# Define the MODFLOW exe path, name and version
# As specified below, it requires that the user copies the MODFLOW-exe-file in the workspace folder.
# Alternatively, indicate the full path where the MODFLOW-exe-file is located.
mfexepath = 'mf2k.exe'
version = 'mf2k' # mf2k or mf2005
# Define the problem.
# A grid of N by N cells.
#Size of model is L by L, aquifer thickness H, hydraulic conductivity is k.
Nlay = 10
N = 101
L = 400.0
H = 50.0
k = 1.0
T = k*H / Nlay
# Create the modflow model
ml = mf.modflow(modelname=name, exe_name=mfexepath, version = version, model_ws = model_ws)
# Add the Discretization package
Hlay = H / Nlay
bot = []
for i in range(1,Nlay):
bot.extend( (-float(i)/Nlay * H, -float(i)/Nlay * H) )
bot.append(-H)
delrow = delcol = L/(N-1)
discret = mf.mfdis(ml,N,N,Nlay,delr=delrow,delc=delcol,top=0.0,botm=bot)
discret.write_file()
# Add the Basic package
Nhalf = (N-1)/2
ibound = np.ones((N,N,Nlay),'int')
ibound[0,:,:] = -1; ibound[-1,:,:] = -1; ibound[:,0,:] = -1; ibound[:,-1,:] = -1
ibound[Nhalf,Nhalf,0]=-1
start=100.0*np.ones((N,N))
start[Nhalf,Nhalf] = 90
bas = mf.mfbas(ml,ibound=ibound,strt=start)
bas.write_file()
# Add the BlockCenterFlow package, the Output Control package, and the PCG solver package
vc = k / (H/Nlay)
bcf = mf.mfbcf(ml,laycon=0,tran=T,vcont=vc)
bcf.write_file()
oc = mf.mfoc(ml)
oc.write_file()
pcg = mf.mfpcg(ml)
pcg.write_file()
print '\nMODFLOW packages created.\nRunning MODFLOW...\n'
# Write all MODFLOW files, call MODFLOW, and read the heads back into Python
ml.write_name_file()
ml.run_model(pause = False)
h_MF_fn = os.path.join(ml.model_ws, name + '.hds')
h = mfrdbin.mfhdsread(ml, 'LF95').read_all(h_MF_fn)
h1 = h[1]
del h
print '\nExporting plots'
# Store some arrays for plotting
x = np.linspace(-L/2,L/2,N)
xg,yg = np.meshgrid(x,x)
#Contour the heads in the first layer
mtpl.figure()
valg = h1[0][:,:,:]
mtpl.contour(xg,yg,valg[:,:,0],np.linspace(90,100,51))
mtpl.axis('scaled')
mtpl.title('Heads contour in the first layer')
plt_export_fn = os.path.join(ml.model_ws, '_plt_0_headscontours.png')
mtpl.savefig(plt_export_fn,dpi=150)
#Make a cross-sectional figure of layers 1, 2, and 10
mtpl.figure()
mtpl.plot(xg[0,:],valg[:,50,0],label='Top layer')
mtpl.plot(xg[0,:],valg[:,50,1],label='Second layer')
mtpl.plot(xg[0,:],valg[:,50,9],label='Bottom layer')
mtpl.title('Cross-section view of column #50')
mtpl.legend(loc='best')
plt_export_fn = os.path.join(ml.model_ws, '_plt_1_crosssection.png')
mtpl.savefig(plt_export_fn,dpi=150)
mtpl.close('all')
print '\nSuccessful FloPy run!\nOutput written in %s' % ml.model_ws
|
bsd-3-clause
|
seanli9jan/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py
|
136
|
1696
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Curly-Mo/sample-recognition
|
ann.py
|
1
|
4427
|
import logging
import numpy as np
logger = logging.getLogger(__name__)
FLANN_ALGS = ['kdtree', 'kmeans', 'composite', 'lsh', 'autotuned']
CV_ALGS = ['kdtree', 'kmeans', 'composite', 'lsh', 'autotuned']
SKLEARN_ALGS = ['kd_tree', 'ball_tree', 'brute', 'auto']
def train_matcher(data, algorithm='kdtree'):
# if algorithm in FLANN_ALGS:
# matcher = fit_flann(data, algorithm)
# el
if algorithm in CV_ALGS:
matcher = fit_cv2(data, algorithm)
elif algorithm in SKLEARN_ALGS:
matcher = fit_sklearn(data, algorithm)
elif algorithm == 'lshf':
matcher = fit_lshf(data)
elif algorithm == 'annoy':
matcher = fit_annoy(data)
if not matcher:
raise ValueError('Invalid matching algorithm: {}'.format(algorithm))
return matcher
def find_neighbors(matcher, data, algorithm='lshf', k=2):
logger.info('Finding (approximate) nearest neighbors...')
if algorithm in FLANN_ALGS:
matches = matcher.nn_index(np.float32(data), k=k)
distances, indices = zip(*(((n1.distance, n2.distance), (n1.trainIdx, n2.trainIdx)) for n1, n2 in matches))
elif algorithm in CV_ALGS:
matches = matcher.knnMatch(np.float32(data), k=k)
distances, indices = zip(*(((n1.distance, n2.distance), (n1.trainIdx, n2.trainIdx)) for n1, n2 in matches))
elif algorithm in SKLEARN_ALGS:
distances, indices = matcher.kneighbors(data, n_neighbors=k)
elif algorithm == 'lshf':
distances, indices = matcher.kneighbors(data, n_neighbors=k)
elif algorithm == 'annoy':
indices = []
distances = []
for d in data:
index, distance = matcher.get_nns_by_vector(d, k, include_distances=True)
indices.append(index)
distances.append(distance)
return distances, indices
def nearest_neighbors(test, train, algorithm='lshf', k=2):
matcher = train_matcher(train, algorithm)
distances, indices = find_neighbors(matcher, test, algorithm, k=k)
return distances, indices
def fit_cv2(data, algorithm):
logger.info('Fitting cv2 FLANN...')
from cv2 import FlannBasedMatcher
KDTREE = 0
index_params = {
'algorithm': KDTREE,
'trees': 5,
#'target_precision': 0.9,
#'build_weight': 0.01,
#'memory_weight': 0,
#'sample_fraction': 0.1,
}
search_params = {'checks': 5}
flann = FlannBasedMatcher(index_params, search_params)
flann.add(np.float32(data))
flann.train()
return flann
def fit_flann(data, algorithm):
logger.info('Fitting FLANN...')
from pyflann import FLANN
matcher = FLANN(
algorithm=algorithm,
checks=32,
eps=0.0,
cb_index=0.5,
trees=1,
leaf_max_size=4,
branching=32,
iterations=5,
centers_init='random',
target_precision=0.9,
build_weight=0.01,
memory_weight=0.0,
sample_fraction=0.1,
log_level="warning",
random_seed=-1,
)
matcher.build_index(data)
return matcher
def fit_sklearn(data, algorithm):
logger.info('Fitting Sklearn Matcher: {}...'.format(algorithm))
from sklearn.neighbors import NearestNeighbors
matcher = NearestNeighbors(
algorithm=algorithm,
n_neighbors=2,
radius=1.0,
leaf_size=30,
metric='minkowski',
p=2,
metric_params=None,
n_jobs=-1,
)
matcher.fit(data)
return matcher
def fit_annoy(data, n_trees=-1):
logger.info('Fitting Annoy Matcher...')
from annoy import AnnoyIndex
logger.info('Building Annoy index...')
matcher = AnnoyIndex(data.shape[1], metric='euclidean')
for i, d in enumerate(data):
matcher.add_item(i, d)
logger.info('Building Annoy Matcher...')
matcher.build(n_trees)
return matcher
def load_annoy(path, n_features=128):
logger.info('Loading Annoy Index {}...'.format(path))
from annoy import AnnoyIndex
matcher = AnnoyIndex(n_features, metric='euclidean')
matcher.load(path)
return matcher
def fit_lshf(data):
logger.info('Fitting LSHForest...')
from sklearn.neighbors import LSHForest
lshf = LSHForest(
n_estimators=20,
min_hash_match=4,
n_candidates=200,
n_neighbors=2,
radius=1.0,
radius_cutoff_ratio=0.9,
random_state=None,
)
lshf.fit(data)
return lshf
|
apache-2.0
|
jlegendary/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
143
|
22295
|
"""
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
|
bsd-3-clause
|
AlexRobson/scikit-learn
|
sklearn/utils/tests/test_murmurhash.py
|
261
|
2836
|
# Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
|
bsd-3-clause
|
choldgraf/ecogtools
|
ecogtools/tfr.py
|
1
|
2902
|
import mne
from glob import glob
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm
__all__ = ['extract_amplitude']
def extract_amplitude(inst, freqs, n_cycles=7, normalize=False, n_hilbert=None,
picks=None, n_jobs=1, ):
"""Extract the time-varying amplitude for a frequency band.
If multiple freqs are given, the amplitude is calculated at each frequency
and then averaged across frequencies.
Parameters
----------
inst : instance of Raw
The data to have amplitude extracted
freqs : array of ints/floats, shape (n_bands, 2)
The frequencies to use. If multiple frequency bands given, amplitude
will be extracted at each and then averaged between frequencies. The
structure of each band is fmin, fmax.
n_cycles : int
The number of cycles to include in the filter length for the hilbert
transform.
normalize : bool
Whether to normalize each frequency amplitude by its mean before
averaging. This can be helpful if some frequencies have a much higher
base amplitude than others.
n_hilbert : int | 'auto' | None
The length of data to use in the Hilbert transform. The data will be
cut to last dimension of this size. If 'auto', the length equal to the
next highest power of two will be used.
picks : array | None
The channels to use for extraction
Returns
-------
inst : mne instance, same type as input 'inst'
The MNE instance with channels replaced with their time-varying
amplitude for the supplied frequency range.
"""
# Data checks
n_hilbert = inst.n_times if n_hilbert is None else n_hilbert
if n_hilbert == 'auto':
n_hilbert = int(2 ** np.ceil(np.log2(inst.n_times)))
n_hilbert = int(n_hilbert)
freqs = np.atleast_2d(freqs)
if freqs.shape[-1] != 2:
raise ValueError('freqs must be shape (n_fbands, 2)')
picks = range(len(inst.ch_names)) if picks is None else picks
# Filter for HFB and extract amplitude
bands = np.zeros([freqs.shape[0], len(inst.ch_names), inst.n_times])
for i, (fmin, fmax) in enumerate(freqs):
length_filt = int(np.floor(n_cycles * sfreq / fmin))
# Extract a range of frequency bands for averaging later
inst_band = inst.copy()
inst_band.filter(fmin, fmax, filter_length=length_filt, n_jobs=n_jobs)
inst_band.apply_hilbert(picks, envelope=True,
n_jobs=n_jobs, n_fft=n_hilbert)
if normalize is True:
# Scale frequency band so that the ratios of all are the same
inst_band_mn = inst_band._data.mean(1)[:, np.newaxis]
inst_band._data /= inst_band_mn
bands[i] = inst_band._data.copy()
# Average across fbands
inst._data[picks, :] = bands.mean(0)
return inst
|
bsd-2-clause
|
jszymon/pacal
|
pacal/depvars/copulas.py
|
1
|
34667
|
"""Set of copulas different types"""
from __future__ import print_function
from pacal.integration import *
from pacal.interpolation import *
from matplotlib.collections import PolyCollection
import pacal.distr
#from pacal import *
from pacal.segments import PiecewiseDistribution, MInfSegment, PInfSegment, Segment, _segint
from pacal.segments import PiecewiseFunction
from pacal.distr import Distr
from pacal.standard_distr import *
#from pacal.nddistr import NDDistr, NDInterpolatedDistr, NDFun
from pacal.utils import epsunique, bisect, fmin2
from pacal.indeparith import _findSegList, convdiracs
from pacal.integration import integrate_fejer2, integrate_iter
from pacal.depvars.nddistr import NDDistr, NDFun
import pylab as plt
import sympy
import numpy as np
from sympy import Symbol, diff, pprint, simplify
from pylab import meshgrid, contour, xlabel, ylabel, gca, figure, axis
import mpl_toolkits.mplot3d.axes3d as p3
try:
from scipy.optimize.optimize import fminbound
have_Scipy_optimize = True
except ImportError:
have_Scipy_optimize = False
class Copula(NDDistr):
def __init__(self, marginals=None):
self.marginals = marginals
super(Copula, self).__init__(len(self.marginals), Vars=self.marginals)
self.a, self.b = self.ranges()
def ranges(self):
vars = self.marginals
a = zeros_like(vars)
b = zeros_like(vars)
for i in range(len(vars)):
a[i], b[i] = vars[i].range()
return a, b
def setMarginals(self, *marginals):
if len(marginals) > 0 and isinstance(marginals[0], pacal.distr.Distr):
self.marginals = marginals
def pdf(self, *X):
"""joint probability density function with marginals *X"""
if self.marginals is None or len(self.marginals) == 0:
U = UniformDistr()
F = [U.get_piecewise_cdf_interp()(X[i]) for i in range(len(X))]
return self.cpdf(*F)
else:
#assert len(self.marginals) >= len(X)
mi = ones_like(X[0])
for i in range(len(X)):
mi = mi * self.marginals[i].get_piecewise_pdf()(X[i])
F = [self.marginals[i].get_piecewise_cdf_interp()(X[i]) for i in range(len(X))]
return np.nan_to_num(self.cpdf(*F) * mi)
#return self.cpdf(*F) * mi
def cdf(self, *X):
"""joint cumulative distribution function with given marginals at point (x,y)"""
if self.marginals is None or len(self.marginals) == 0:
return self.ccdf(*X)
else:
F = [self.marginals[i].get_piecewise_cdf_interp()(X[i]) for i in range(len(X))]
return self.ccdf(*F)
def dualcdf(self, *X):
si = zeros_like(X[0])
for i in range(len(X)):
si += self.marginals[i].get_piecewise_cdf_interp()(X[i])
return si - self.ccdf(*X)
def jpdf_(self, f, g, x, y):
"""joint probability density function with marginals *X"""
if isinstance(f, Distr):
return self.cpdf(f.get_piecewise_cdf_interp()(x), g.get_piecewise_cdf_interp()(y)) * f.get_piecewise_pdf()(x) * g.get_piecewise_pdf()(y)
else:
return self.cpdf(f.cumint()(x), g.cumint()(y)) * f(x) * g(y)
def jcdf_(self, f, g, x, y):
"""joint cumulative distribution function with marginals f, g at point (x,y)"""
#return self.ccdf(f.get_piecewise_cdf()(X), g.get_piecewise_cdf()(Y))
return self.ccdf(f.get_piecewise_cdf()(x), g.get_piecewise_cdf()(y))
def cpdf(self, *X):
"""Copula density, joint probability density function with uniform U[0,1] marginals"""
#pass
pass #return zeros_like(X[0])
def ccdf(self, *X):
"""Copula, joint cumulative distribution function with uniform U[0,1] marginals"""
pass
def debug_plot(self, n=40, show_pdf=False, azim=210, elev=30):
#Z = self.cdf(f.get_piecewise_cdf()(X), g.get_piecewise_cdf()(Y))
#Z = self.jcdf(f, g, X, Y)
if self.marginals is not None and len(self.marginals) > 1:
f, g = self.marginals[:2]
self.setMarginals((f, g))
else:
f, g = UniformDistr(), UniformDistr()
Lf, Uf = f.ci(0.01)
Lg, Ug = g.ci(0.01)
deltaf = (Uf - Lf) / n
deltag = (Ug - Lg) / n
X, Y = meshgrid(arange(Lf, Uf, deltaf), arange(Lg, Ug, deltag))
if not show_pdf:
Z = self.cdf(X, Y)
fig = figure(figsize=plt.figaspect(1))
ax = fig.add_subplot(111, projection='3d', azim=azim, elev=elev)
#ax = p3.Axes3D(fig)
xf = arange(Lf, Uf, deltaf)
xg = arange(Lg, Ug, deltag)
cf = f.cdf(xf)
cg = g.cdf(xg)
ax.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, color='k', antialiased=True)#cmap=cm.jet
cset = ax.contour(X, Y, Z, zdir='z', color='k', offset=0)
ax.set_xlabel('$X$')
ax.set_xlim3d(Lf, Uf)
ax.set_ylabel('$Y$')
ax.set_ylim3d(Lg, Ug)
ax.set_zlabel('$Z$')
ax.set_zlim3d(0, 1)
else:
fig = figure(figsize=plt.figaspect(1))
ax2 = fig.add_subplot(111, projection='3d', azim=azim, elev=elev)
Z2 = self.pdf(X, Y)
xf = arange(Lf, Uf, deltaf)
xg = arange(Lg, Ug, deltag)
cf = f.pdf(xf)
cg = g.pdf(xg)
ax2.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax2.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
ax2.plot_wireframe(X, Y, Z2, rstride=1, cstride=1, color='k', antialiased=True)
cset = ax2.contour(X, Y, Z2, color='k', zdir='z', offset=0)
ax2.set_xlabel('$X$')
ax2.set_xlim3d(Lf, Uf)
ax2.set_ylabel('$Y$')
ax2.set_ylim3d(Lg, Ug)
ax2.set_zlabel('$Z$')
zlim = 1.01*np.max(array([np.max(Z2), max(cf), max(cg)]))
ax2.set_zlim3d(0,zlim)
def _segint(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
#print params.integration_infinite.exponent
if L > U:
if params.segments.debug_info:
print("Warning: reversed integration interval, returning 0")
return 0, 0
if L == U:
return 0, 0
if force_minf:
#i, e = integrate_fejer2_minf(fun, U, a = L, debug_info = debug_info, debug_plot = True)
i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif force_pinf:
#i, e = integrate_fejer2_pinf(fun, L, b = U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif not isinf(L) and not isinf(U):
if force_poleL and force_poleU:
i1, e1 = integrate_fejer2_Xn_transformP(fun, L, (L+U)*0.5, debug_info = debug_info, debug_plot = debug_plot)
i2, e2 = integrate_fejer2_Xn_transformN(fun, (L+U)*0.5, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = i1+i2, e1+e2
elif force_poleL:
i, e = integrate_fejer2_Xn_transformP(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif force_poleU:
i, e = integrate_fejer2_Xn_transformN(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
else:
#i, e = integrate_fejer2(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif isinf(L) and isfinite(U) :
#i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_fejer2_minf(fun, U, debug_info = debug_info, debug_plot = debug_plot, exponent = params.integration_infinite.exponent,)
elif isfinite(L) and isinf(U) :
#i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_fejer2_pinf(fun, L, debug_info = debug_info, debug_plot = debug_plot, exponent = params.integration_infinite.exponent,)
elif L<U:
i, e = integrate_fejer2_pminf(fun, debug_info = debug_info, debug_plot = debug_plot, exponent = params.integration_infinite.exponent,)
else:
print("errors in _conv_div: x, segi, segj, L, U =", L, U)
return i,e
def cov(self, i=None, j=None):
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
f, g = dij.marginals[0], self.marginals[1]
fmean = f.mean()
gmean = g.mean()
f0, f1 = f.get_piecewise_pdf().range()
g0, g1 = g.get_piecewise_pdf().range()
print(fmean, gmean, var, c_var, f0, f1, g0, g1)
if i == j:
c, e = c, e = integrate_fejer2(lambda x: (x - fmean) ** 2 * f.pdf(x), f0, f1)
else:
c, e = integrate_iter(lambda x, y: (x - fmean) * (y - gmean) * dij.pdf(x, y), f0, f1, g0, g1)
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.cov(i, j)
return c
def corrcoef(self, i=None, j=None):
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
f, g = dij.marginals[0], self.marginals[1]
return self.cov(i, j)/f.std()/g.std()
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.corrcoef(i, j)
return c
def tau(self, i=None, j=None):
"""Kendall's tau: 4*\int C(x,y) dC(x,y)-1
"""
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
f, g = dij.marginals[0], self.marginals[1]
f0, f1 = f.get_piecewise_pdf().range()
g0, g1 = g.get_piecewise_pdf().range()
if i == j:
c, e = 1, 0
else:
c, e = integrate_iter(lambda x, y: dij.cdf(x, y) * dij.pdf(x, y), f0, f1, g0, g1)
c = 4 * c - 1
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.ctau(i, j)
return c
def beta(self, i=None, j=None):
"""Blomqvist's beta: 4 * C(0.5, 0.5) - 1
"""
return 4*self.ccdf(0.5,0.5)-1
def rho_s(self, i=None, j=None):
"""Spearmans rho: 12*\int x*y dC(x,y)-3 = 12 \int C(d,y)dxdy - 3
"""
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
if i == j:
c, e = 1, 0
else:
#c, e = integrate_iter(lambda x, y: x * y * dij.cpdf(x, y), 0.0, 1.0, 0.0, 1.0)
c, e = integrate_iter(lambda x, y: dij.ccdf(x, y), 0.0, 1.0, 0.0, 1.0)
c = 12 * c - 3
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.rho_s(i, j)
return c
def ctau(self, i=None, j=None):
"""Kendall's tau: 4*\int C(x,y) dC(x,y)-1
"""
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
if i == j:
c, e = 1, 0
else:
#c, e = integrate_iter(lambda x, y: x * y * dij.cpdf(x, y), 0.0, 1.0, 0.0, 1.0)
c, e = integrate_iter(lambda x, y: dij.ccdf(x, y) * dij.cpdf(x, y), 0.0, 1.0, 0.0, 1.0)
c = 4 * c - 1
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.ctau(i, j)
return c
class PiCopula(Copula):
def __init__(self, marginals=None):
super(PiCopula, self).__init__(marginals=marginals)
def cpdf(self, *X):
return ones_like(X[0])
def ccdf(self, *X):
pi = ones_like(X[0])
for xi in X:
pi *= xi
return pi
class MCopula(Copula):
def __init__(self, marginals=None):
super(MCopula, self).__init__(marginals)
self._segint = self._segmin
def cpdf(self, *X):
return zeros_like(X[0])#self.ccdf(*X)
def ccdf(self, *X):
mi = zeros_like(X[0])+1
for xi in X[0:]:
xia = array(xi)
ind = xia < mi
if isscalar(mi) | size(mi)==1:
mi = xia
else:
mi[ind] = xia[ind]
return mi
def _segmin(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
xopt = fmin2(lambda x: fun(float(x)), L, U, xtol = 1e-16)
return xopt, 0#fun(xopt), 0
def debug_plot(self, n=40, show_pdf=False, azim=210, elev=30):
#Z = self.cdf(f.get_piecewise_cdf()(X), g.get_piecewise_cdf()(Y))
#Z = self.jcdf(f, g, X, Y)
if self.marginals is not None and len(self.marginals) > 1:
f, g = self.marginals[:2]
self.setMarginals((f, g))
else:
f, g = UniformDistr(), UniformDistr()
Lf, Uf = f.ci(0.01)
Lg, Ug = g.ci(0.01)
deltaf = (Uf - Lf) / n
deltag = (Ug - Lg) / n
X, Y = meshgrid(arange(Lf, Uf, deltaf), arange(Lg, Ug, deltag))
if not show_pdf:
Z = self.cdf(X, Y)
Z2 = self.pdf(X, Y)
fig = figure(figsize=plt.figaspect(1))
ax = fig.add_subplot(111, projection='3d', azim=azim, elev=elev)
#ax = p3.Axes3D(fig)
xf = arange(Lf, Uf, deltaf)
xg = arange(Lg, Ug, deltag)
cf = f.cdf(xf)
cg = g.cdf(xg)
ax.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
cset = ax.contour(X, Y, Z, zdir='z', offset=0)
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, color='k', antialiased=True)#cmap=cm.jet
ax.set_xlabel('$X$')
ax.set_xlim3d(Lf, Uf)
ax.set_ylabel('$Y$')
ax.set_ylim3d(Lg, Ug)
ax.set_zlabel('$Z$')
ax.set_zlim3d(0, 1)
# wykres F(x)=G(Y)
else:
fig = figure(figsize=plt.figaspect(1))
ax = fig.add_subplot(111, projection='3d')
#ax = fig.add_subplot(122, projection='3d')
t = linspace(0.01, 0.99,40)
X = f.quantile(t)
Y = g.quantile(t)
Z = f(X)*g(Y)
cf = f.pdf(xf)
cg = g.pdf(xg)
ax.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
ax.plot_surface(np.vstack([X,X]), np.vstack([Y,Y]), np.vstack([np.zeros_like(Z),Z]),
cstride = 1, rstride = 1,# cmap=cm.jet,
linewidth = -1, edgecolor="k", color = "c", alpha=0.7, antialiased = True)
ax.axis((Lf, Uf, Lg, Ug))
zlim = 1.01*np.max(array([max(Z), max(cf), max(cg)]))
ax.set_zlim3d(0,zlim)
class WCopula(MCopula):
def __init__(self, marginals=None):
super(WCopula, self).__init__(marginals)
self._segint = self._segmax
def cpdf(self, *X):
return zeros_like(X[0])#self.ccdf(*X)
def ccdf(self, *X):
si = zeros_like(X[0])
for xi in X[0:]:
si += array(xi)
si = si - 1
ind = (si < 0)
if isscalar(si) | size(si)==1:
if ind:
si = 0.0
else:
si[ind] = 0
return si
def _segmax(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
#xopt = fminbound(fun, L, U, xtol = 1e-16)
#xopt = fminbound(lambda x: 100-fun(float(x)), L, U, xtol = 1e-16)
xopt = fmin2(lambda x: 1-fun(float(x)), L, U, xtol = 1e-16)
return xopt, 0
def _segmin(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
#xopt = fminbound(fun, L, U, xtol = 1e-16)
#xopt = fminbound(lambda x: 100-fun(float(x)), L, U, xtol = 1e-16)
xopt = fmin2(lambda x: fun(float(x)), L, U, xtol = 1e-16)
return xopt, 0
class ArchimedeanCopula(Copula):
# TODO
def __init__(self, fi=log, fi_deriv=lambda s: 1 / s,
fi_inv=exp, fi_inv_nth_deriv=exp,
marginals=None):
super(ArchimedeanCopula, self).__init__(marginals)
#self.theta = Symbol('theta')
self.fi = fi
self.fi_deriv = fi_deriv
self.fi_inv = fi_inv
self.fi_inv_nth_deriv = fi_inv_nth_deriv
#self.debug_info_()
def debug_info_(self):
vars = self.symVars
#for i in range(self.d):
# vars.append(sympy.Symbol("u{0}".format(i + 1)))
si = 0
for i in range(self.d):
si += self.fi(vars[i])
pi = 1;
for i in range(self.d):
pi *= self.fi_deriv(vars[i])
print("si=\n", pprint(si))
print("pi=\n", pprint(pi))
print("C=\n", pprint(self.fi_inv(si)))
#print "C=\n", pprint(self.ccdf(*tuple(vars)))
#print "c=\n", pprint(sympy.simplify(self.fi_inv_nth_deriv(si) * pi))
print("c=\n", pprint(self.fi_inv_nth_deriv(si) * pi))
#print "c=\n", pprint(self.cpdf(*tuple(vars)))
def tau_c(self):
return 1 + 4 * integrate_fejer2(lambda t : self.fi(t) / self.fi_deriv(t), 0, 1)[0]
def cpdf(self, *X):
assert len(X) == len(self.marginals), "incorrect copula dimension"
si = zeros_like(X[0])
for xi in X:
si = si + self.fi(xi)
si = self.fi_inv_nth_deriv(si)
pi = ones_like(X[0])
for xi in X:
pi = pi * self.fi_deriv(xi)
return si * pi
def ccdf(self, *X):
assert len(X) == len(self.marginals), "incorrect copula dimension"
si = zeros_like(X[0])
for xi in X:
si += self.fi(xi)
ind = (si < 0) # or isnan(si)
#if len(ind)>0:
si[ind] = 0.0
si = self.fi_inv(si)
return si
class ArchimedeanSymbolicCopula(ArchimedeanCopula):
# TODO
def __init__(self,
fi=lambda t, theta: log(t),
fi_inv=None, #lambda t, theta:(-sympy.log(t)) ** theta,
theta=2,
marginals=None):
self.theta = float(theta)#Symbol('theta')
self.t = Symbol('t')
self.s = Symbol('s')
self.d = len(marginals)
self.fi_ = fi
self.fi_inv_ = fi_inv
self.sym_fi = fi(self.t, self.theta)
self.sym_fi_deriv = sympy.diff(self.sym_fi, self.t)
if fi_inv is None:
self.sym_fi_inv = sympy.solve(self.sym_fi - self.s, self.t)[0]
else:
self.sym_fi_inv = fi_inv(self.s, self.theta)
self.sym_fi_inv_nth_deriv = sympy.diff(self.sym_fi_inv, self.s, self.d)
#self.debug_info()
super(ArchimedeanSymbolicCopula, self).__init__(fi=sympy.lambdify(self.t, self.sym_fi, "numpy"),
fi_deriv=sympy.lambdify(self.t, self.sym_fi_deriv, "numpy"),
fi_inv=sympy.lambdify(self.s, self.sym_fi_inv, "numpy"),
fi_inv_nth_deriv=sympy.lambdify(self.s, self.sym_fi_inv_nth_deriv, "numpy"),
marginals=marginals)
vars = self.symVars
si = 0
for i in range(self.d):
si += self.fi_(vars[i], self.theta)
self.sym_C = self.fi_inv_(si, self.theta)
def eliminate(self, var):
var, c_var = self.prepare_var(var)
c_marginals = [self.marginals[i] for i in c_var]
if len(var) == 0:
return self
return ArchimedeanSymbolicCopula(fi=self.fi_,
fi_inv=self.fi_inv_,
theta=self.theta,
marginals=c_marginals)
def ccond(self, var):
"""It returns conditional copula f([var, c_vars]) = C(c_var | var)
"""
var, c_var = self.prepare_var(var)
symvars = [self.symVars[i] for i in var]
DC = self.sym_C
for i in range(len(self.Vars)):
if i in set(var):
DC = sympy.diff(DC, self.symVars[i])
else:
pass
dC = sympy.lambdify(self.symVars, DC, "numpy")
return NDFun(self.d, self.Vars, sympy.lambdify(self.symVars, DC, "numpy"))
def condition(self, var, *X):
"""It returns conditional pdf for given copula
f(c_var) = Pr(c_var | var=X)
"""
var, c_var = self.prepare_var(var)
num = self.pdf
den = self.eliminate(c_var)
def fun_(*Y_):
j, k = 0, 0
Y, Yvar = [], []
#dF = ones_like(X[0])
for i in range(len(self.Vars)):
if i in set(var):
Y.append(X[j])
Yvar.append(X[j])
j += 1
else:
Y.append(Y_[k])
k += 1
return num(*Y) / den.pdf(*X)
return NDFun(len(c_var), [self.Vars[i] for i in c_var], fun_)
def conditionCDF(self, var, *X):
"""It returns conditional cdf for given copula
f(c_var) = Pr(Y<c_var | var=X)
"""
funcond = self.ccond(var)
var, c_var = self.prepare_var(var)
new_cond = var
def fun_(*Y_):
j, k = 0, 0
Y = []
dF = ones_like(X[0])
for i in range(len(self.Vars)):
if i in set(var):
Y.append(self.marginals[i].get_piecewise_cdf()(X[j]))
j += 1
else:
Y.append(self.marginals[i].get_piecewise_cdf()(Y_[k]))
dF *= self.marginals[i].get_piecewise_pdf()(Y_[k])
k += 1
return funcond(*Y)
return NDFun(len(new_cond), [self.Vars[i] for i in c_var], fun_)
def condfun(self, var):
"""It returns conditional cdf function f([var, c_vars]) = Pr(Y<c_var | var)
"""
funcond = self.ccond(var)
var, c_var = self.prepare_var(var)
new_cond = var
def fun_(*X):
j, k = 0, 0
Y = []
dF = ones_like(X[0])
for i in range(len(self.Vars)):
Y.append(self.marginals[i].get_piecewise_cdf()(X[i]))
if i in set(var):
pass
else:
dF *= self.marginals[i].get_piecewise_pdf()(X[i])
return funcond(*Y)
return NDFun(self.d, self.Vars, fun_)
def debug_info(self):
#self.fi_inv_defiv = simplify(sympy.diff(self.sym_fi_inv(self.s, self.theta), self.s))
print("theta=", self.theta)
print("fi(theta)=", self.fi_(self.t, sympy.Symbol("theta")))
print("fi=\n", pprint(self.sym_fi))
print("fi_deriv=\n", pprint(self.sym_fi_deriv))
print("fi_inv=\n", self.sym_fi_inv, ",\n", pprint(self.sym_fi_inv))
print("fi_inv_nth_deriv=\n", pprint(self.sym_fi_inv_nth_deriv))
print("fi=\n", sympy.latex(self.sym_fi))
print("fi_deriv=\n", sympy.latex(self.sym_fi_deriv))
print("fi_inv=\n", self.sym_fi_inv, ",\n", sympy.latex(self.sym_fi_inv))
print("fi_inv_nth_deriv=\n", sympy.latex(self.sym_fi_inv_nth_deriv))
def rand2d_invcdf(self, n):
u = self.marginals[0].rand_invcdf(n)
t = UniformDistr().rand(n)
v = zeros_like(t)
for i in range(len(u)):
#Cd = self.condition([0],u[i])
#print i
v[i] = self.conditionCDF([0], u[i]).distr_pdf.inverse(t[i])
#v[i] = bisect(lambda x : condition(x,u[i])-t[i], 1e-50,1)
return u, v
class GumbelCopula2d(Copula):
def __init__(self, theta=3.1, marginals=None):
super(GumbelCopula2d, self).__init__(marginals)
self.theta = theta
self.one_over_theta = 1.0 / theta
self.theta_square = theta ** 2
def fi(self, t):
return pow(-np.log(t), self.theta)# ** self.theta
def fi_inv(self, s):
return exp(-s ** self.one_over_theta)
def cpdf(self, *X):
si = zeros_like(X[0])
for xi in X:
si += self.fi(xi)
si = self.fi_inv(si) * (si ** (self.one_over_theta - 2.0) * (-1.0 + self.theta + si ** self.one_over_theta)) / self.theta_square
for xi in X:
si *= self.theta * self.fi(xi) ** (1 - self.one_over_theta) / xi
return si
def ccdf(self, *X):
si = zeros_like(X[0])
for xi in X:
si += self.fi(xi)
si = self.fi_inv(si)
return si
class GumbelCopula(ArchimedeanSymbolicCopula):
"""Clayton copula, C(theta=-1) = W, C(theta=0) = Pi, C(theta=+Inf) = M"""
def __init__(self, theta=3.1, marginals=None):
super(GumbelCopula, self).__init__(fi=self.fi_, fi_inv=self.fi_inv_,
theta=theta, marginals=marginals)
def fi_(self, t, theta):
return (-sympy.log(t)) ** theta
def fi_inv_(self, s, theta):
return sympy.exp(-(s ** (1 / theta)))
class ClaytonCopula(ArchimedeanSymbolicCopula):
"""Clayton copula, C(theta=-1) = W, C(theta=0) = Pi, C(theta=+Inf) = M"""
def __init__(self, theta=3.1, marginals=None):
super(ClaytonCopula, self).__init__(fi=self.fi_, fi_inv=self.fi_inv_,
theta=theta, marginals=marginals)
# theta = float(theta)
# self.theta = theta
# self.one_over_theta = 1.0 / theta
# self.theta_square = theta ** 2
def fi_(self, t, theta):
return 1 / theta * (t ** (-theta) - 1)
# #return self.one_over_theta * (pow(t, -self.theta) - 1.0)
def fi_inv_(self, s, theta):
return (1 + s * theta) ** (-1 / theta)
# def cpdf(self, *X):
# si = zeros_like(X[0])
# for xi in X:
# si += xi ** -self.theta
# si = si - 1
# ind = (si < 0) # or isnan(si)
# si[ind] = 0
# si = (1 + self.theta) / self.theta_square * si ** (-(self.one_over_theta + 2))
# for xi in X:
# si *= -self.theta * xi ** (-self.theta - 1)
# return si
# def ccdf(self, *X):
# si = zeros_like(X[0])
# for xi in X:
# si += self.fi(xi)
# ind = si < 0
# si[ind] = 0
# si = self.fi_inv(si)
# return si
class FrankCopula(ArchimedeanSymbolicCopula):
"""Clayton copula, C(theta=-1) = W, C(theta=0) = Pi, C(theta=+Inf) = M"""
def __init__(self, theta=3.1, marginals=None):
self.const2 = exp(-theta) - 1.0
super(FrankCopula, self).__init__(fi=self.fi_, fi_inv=self.fi_inv_,
theta=theta, marginals=marginals)
def fi_(self, t, theta):
return -sympy.log((sympy.exp(-t * theta) - 1) / (exp(-self.theta) - 1.0))
# def fi_(self, t, theta):
# return - log((exp(-t * theta) - 1) / (exp(-self.theta) - 1.0))
def fi_inv_(self, s, theta):
return -sympy.log(sympy.exp(-s - theta) - sympy.exp(-s) + 1) / theta
class FrankCopula2d(Copula):
"""Frank copula, C(theta=-Inf) = W, C(theta=0)~Pi, C(theta=+Inf)=M
B3 in H. Joe pp. 139-
"""
def __init__(self, theta=1.0, marginals=None):
self.theta = theta # delta
self.eta = -expm1(-self.theta)
self.one_over_theta = 1.0 / theta
self.theta_square = theta ** 2
super(FrankCopula2d, self).__init__(marginals)
def fi(self, t):
return logexp_m1(t * self.theta) - logexp_m1(self.theta)
def fi_inv(self, s):
if expm1(-self.theta) > 0:
return -1.0 / self.theta * logexp_p1(-s, expm1(-self.theta))
elif expm1(-self.theta) < 0:
return -1.0 / self.theta * log_1m_exp(-s, expm1(-self.theta))
else:
return -1.0 / self.theta * logexp_p1(-s, 0)
def cpdf(self, *X):
si = zeros_like(X[0])
pi = ones_like(X[0])
n = len(X)
for xi in X:
si += xi
pi *= -np.expm1(-self.theta * xi)
yi = self.theta * self.eta * np.exp(-self.theta * si) / (self.eta - pi) ** n
return yi
def ccdf(self, *X):
pi = ones_like(X[0])
for xi in X:
pi *= -expm1(-self.theta * xi)
yi = -self.one_over_theta * np.log1p(-pi / self.eta)
return yi
def logexp_p1(x, a=1.0):
"""return log(a*exp(x) + 1)"""
x = x + log(abs(a))
yy = log1p(exp(x))
ind = exp(x) > 1e16
yy[ind] = x[ind]
ind = exp(x) < 1e-16
yy[ind] = exp(x[ind])
return yy
def logexp_m1(x, a=1.0):
"""return -log(a*exp(-x) - 1)"""
x = x + log(abs(a))
yy = -log(abs(expm1(-x)))
if isscalar(x):
if exp(-x) > 1e16:
yy = x
if exp(-x) < 1e-16:
yy = exp(-x)
else:
ind = exp(-x) > 1e16
yy[ind] = x[ind]
ind = exp(-x) < 1e-16
yy[ind] = exp(-x[ind])
return yy
def log_1m_exp(x, a=1.0):
"""return -log(1-a*exp(-x))"""
x = x + log(abs(a))
yy = log(abs(expm1(x)))
ind = exp(x) > 1e16
yy[ind] = x[ind]
ind = exp(x) < 1e-16
yy[ind] = -exp(x[ind])
return yy
def convmean(F, G, p=0.5, q=0.5, theta=1.0):
"""Probabilistic weighted mean of f and g
"""
f = F.get_piecewise_pdf()
g = G.get_piecewise_pdf()
if p + q != 1.0 :
p1 = abs(p) / (abs(p) + abs(q))
q = abs(q) / (abs(p) + abs(q))
p = p1;
if q == 0:
return f;
bf = f.getBreaks()
bg = g.getBreaks()
b = add.outer(bf * p, bg * q)
fun = lambda x : convmeanx(F, G, segList, x, p, q, theta=theta)
ub = epsunique(b)
fg = PiecewiseDistribution([]);
op = lambda x, y : p * x + q * y;
if isinf(ub[0]):
segList = _findSegList(f, g, ub[1] - 1, op)
seg = MInfSegment(ub[1], fun)
segint = seg.toInterpolatedSegment()
fg.addSegment(segint)
ub = ub[1:]
if isinf(ub[-1]):
segList = _findSegList(f, g, ub[-2] + 1, op)
seg = PInfSegment(ub[-2], fun)
segint = seg.toInterpolatedSegment()
fg.addSegment(segint)
ub = ub[0:-1]
for i in range(len(ub) - 1) :
segList = _findSegList(f, g, (ub[i] + ub[i + 1]) / 2, op)
seg = Segment(ub[i], ub[i + 1], fun)
segint = seg.toInterpolatedSegment()
fg.addSegment(segint)
# Discrete parts of distributions
fg_discr = convdiracs(f, g, fun=lambda x, y : x * p + y * q)
for seg in fg_discr.getDiracs():
fg.addSegment(seg)
return fg
def convmeanx(F, G, segList, xx, p=0.5, q=0.5, theta=2):
"""Probabilistic weighted mean of f and g, integral at points xx
"""
if size(xx) == 1:
xx = asfarray([xx])
wyn = zeros_like(xx)
#P = PiCopula()
#P = GumbelCopula(theta)
P = FrankCopula2d(theta)
#P.corrcoef()
#P = ClaytonCopula(theta)
#fun = lambda t : P.fun(segi( t / p)/q, segj((x - t)/q)/q)
#W = PiCopula()
#fun = lambda t : P.ccdf(segi(t / p) / p / q, segj((x - t) / q) / p / q)
fun = lambda t : P.jpdf(F, G, (t / p), (x - t) / q) / p / q
for j in range(len(xx)) :
x = xx[j]
I = 0
err = 0
for segi, segj in segList:
if segi.isSegment() and segj.isSegment():
L = max(segi.a * p, (x - segj.b * q))
U = min(segi.b * p, (x - segj.a * q))
i, e = _segint(fun, L, U)
#elif segi.isDirac() and segj.isSegment():
# i = segi.f*segj((x-segi.a)/q)/q # TODO
# e=0;
#elif segi.isSegment() and segj.isDirac():
# i = segj.f*segi((x-segj.a)/p)/p # TODO
# e=0;
#elif segi.isDirac() and segj.isDirac():
# pass
# #i = segi(x-segj.a)/p/q # TODO
# #e=0;
I += i
err += e
wyn[j] = I
return wyn
if __name__ == "__main__":
from pylab import *
from .nddistr import plot_2d_distr
# # ========= ArchimedeanCopulas tests ============================
# A = ArchimedeanSymbolicCopula(fi=lambda t, theta : 1 / theta * (t ** (-theta) - 1),
# #fi_inv=lambda s, theta : (1+ theta*s) ** (-1/theta),
# theta=1.0,
# marginals=[BetaDistr(4, 4, sym="X"), BetaDistr(2, 4, sym="Y"), BetaDistr(5, 3, sym="Z")])
# #BetaDistr(2, 3).summary()
from pacal.depvars.nddistr import *
c = ClaytonCopula(theta = 0.2, marginals=[UniformDistr(), UniformDistr()])
c.plot()
d = IJthOrderStatsNDDistr(UniformDistr(), 10, 1, 10)
plot_2d_distr(d)
show()
0/0
marginals = [BetaDistr(5, 2, sym="X"), BetaDistr(3, 6, sym="Y")]
C = FrankCopula(10, marginals)
C.plot()
plot_2d_distr(C)
C_condition_y_05 = C.condition([1], 0.5)
figure()
C_condition_y_05.distr_pdf.plot()
#print C_condition_y_05.distr_pdf.summary()
show()
0 / 0
|
gpl-3.0
|
BhallaLab/benchmarks
|
neuro_morpho/buildCA1Pyr.py
|
1
|
5403
|
import moogli
import numpy
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
PI = 3.14159265359
frameRunTime = 0.001
runtime = 1.0
inject = 25e-10
simdt = 5e-5
FaradayConst = 96845.34
def makePlot( cell ):
fig = plt.figure( figsize = ( 10, 12 ) )
chans = ['hd', 'kdr', 'na3', 'nax', 'kap', 'kad']
compts = cell.compartments
epos = cell.electrotonicDistanceFromSoma
gpos = cell.geometricalDistanceFromSoma
combo = list(zip( gpos, compts ))
#combo.sort( key=lambda c:c[1].x)
combo.sort( key= lambda c:c[0] )
for i in chans:
x = []
y = []
for j in combo:
area = j[1].length * j[1].diameter * PI
#x.append( j[1].x )
x.append( j[0] )
if moose.exists( j[1].path + '/' + i ):
elm = moose.element( j[1].path + '/' + i )
y.append( elm.Gbar / area )
else:
y.append( 0.0 )
pylab.plot( x, y, '-bo', label = i )
pylab.legend()
pylab.show()
def main():
cm = ChannelML( {'temperature': 32 })
cm.readChannelMLFromFile( 'CA1_migliore_reference/hd.xml' )
cm.readChannelMLFromFile( 'CA1_migliore_reference/kap.xml' )
cm.readChannelMLFromFile( 'CA1_migliore_reference/kad.xml' )
cm.readChannelMLFromFile( 'CA1_migliore_reference/kdr.xml' )
cm.readChannelMLFromFile( 'CA1_migliore_reference/na3.xml' )
cm.readChannelMLFromFile( 'CA1_migliore_reference/nax.xml' )
if ( len( sys.argv ) < 2 ):
print("Usage: ", sys.argv[0], " filename")
return
# filename = "./Bhavika_swcplusnmlfiles/preliminarily corrected nmlfiles/ascoli+buzsaki/valid/" + sys.argv[1]
filename = sys.argv[1]
moose.Neutral( '/model' )
# Load in the swc file.
cell = moose.loadModel( filename, '/model/ca1' )
for i in moose.wildcardFind( '/library/##' ):
i.tick = -1
chanDistrib = [ \
"EM", "#", "-58e-3", \
"initVm", "#", "-65e-3", \
"RM", "#", "2.8", \
"CM", "#", "0.01", \
"RA", "#", "1.5", \
"RA", "#axon#", "0.5", \
"hd", "#dend#,#apical#", "5e-2*(1+(r*3e4))", \
"kdr", "#", "100", \
"na3", "#soma#,#dend#,#apical#", "250", \
"nax", "#axon#", "1250", \
"kap", "#axon#,#soma#", "300", \
"kap", "#dend#,#apical#", "150*(1+sign(100-r*1e6)) * (1+(r*1e4))", \
"kad", "#dend#,#apical#", "150*(1+sign(r*1e6-100))*(1+r*1e4)", \
]
moose.showfields( cell[0] )
cell[0].channelDistribution = chanDistrib
cell[0].parseChanDistrib()
for i in range( 8 ):
moose.setClock( i, simdt )
hsolve = moose.HSolve( '/model/ca1/hsolve' )
hsolve.dt = simdt
hsolve.target = '/model/ca1/soma'
'''
'''
moose.reinit()
makePlot( cell[0] )
# Now we set up the display
moose.le( '/model/ca1/soma' )
soma = moose.element( '/model/ca1/soma' )
kap = moose.element( '/model/ca1/soma/kap' )
graphs = moose.Neutral( '/graphs' )
vtab = moose.Table( '/graphs/vtab' )
moose.connect( vtab, 'requestOut', soma, 'getVm' )
kaptab = moose.Table( '/graphs/kaptab' )
moose.connect( kaptab, 'requestOut', kap, 'getGk' )
compts = moose.wildcardFind( "/model/ca1/#[ISA=CompartmentBase]" )
'''
for i in compts:
if moose.exists( i.path + '/Na' ):
print i.path, moose.element( i.path + '/Na' ).Gbar, \
moose.element( i.path + '/K_DR' ).Gbar, \
i.Rm, i.Ra, i.Cm
'''
'''
Na = moose.wildcardFind( '/model/ca1/#/Na#' )
print Na
Na2 = []
for i in compts:
if ( moose.exists( i.path + '/NaF2' ) ):
Na2.append( moose.element( i.path + '/NaF2' ) )
if ( moose.exists( i.path + '/NaPF_SS' ) ):
Na2.append( moose.element( i.path + '/NaPF_SS' ) )
ecomptPath = map( lambda x : x.path, compts )
print "Na placed in ", len( Na ), len( Na2 ), " out of ", len( compts ), " compts."
'''
compts[0].inject = inject
ecomptPath = [x.path for x in compts]
# Graphics stuff here.
app = QtGui.QApplication(sys.argv)
morphology = moogli.read_morphology_from_moose(name = "", path = "/model/ca1")
morphology.create_group( "group_all", ecomptPath, -0.08, 0.02, \
[0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.1] )
viewer = moogli.DynamicMorphologyViewerWidget(morphology)
def callback( morphology, viewer ):
moose.start( frameRunTime )
Vm = [moose.element( x ).Vm for x in compts]
morphology.set_color( "group_all", Vm )
currTime = moose.element( '/clock' ).currentTime
#print currTime, compts[0].Vm
if ( currTime < runtime ):
return True
return False
viewer.set_callback( callback, idletime = 0 )
viewer.showMaximized()
viewer.show()
app.exec_()
t = numpy.arange( 0, runtime, vtab.dt )
fig = plt.figure()
p1 = fig.add_subplot(311)
p2 = fig.add_subplot(312)
p2.plot( t, vtab.vector, label = 'Vm Soma' )
p2.legend()
p3 = fig.add_subplot(313)
p3.plot( t, kaptab.vector, label = 'kap Soma' )
p3.legend()
plt.show()
if __name__ == '__main__':
main()
|
gpl-2.0
|
bthirion/scikit-learn
|
sklearn/utils/estimator_checks.py
|
16
|
64623
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in
["MultinomialNB", "LabelPropagation", "LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_no_fit_attributes_set_in_init
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_list(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, Estimator):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
# should be just `estimator.fit(X, y)`
# after merging #6141
if name in ['SpectralBiclustering']:
estimator.fit(X)
else:
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
def check_dont_overwrite_parameters(name, Estimator):
# check that fit method only changes or sets private attributes
if hasattr(Estimator.__init__, "deprecated_original"):
# to not check deprecated classes
return
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with ignore_warnings(category=DeprecationWarning):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if Estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_array_almost_equal(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, Estimator):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(name, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, Estimator):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = Estimator()
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_decision_proba_consistency(name, Estimator):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = Estimator()
set_testing_parameters(estimator)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
|
bsd-3-clause
|
zrhans/pythonanywhere
|
.virtualenvs/django19/lib/python3.4/site-packages/pandas/sandbox/qtpandas.py
|
13
|
4347
|
'''
Easy integration of DataFrame into pyqt framework
@author: Jev Kuznetsov
'''
# GH9615
import warnings
warnings.warn("The pandas.sandbox.qtpandas module is deprecated and will be "
"removed in a future version. We refer users to the external package "
"here: https://github.com/datalyze-solutions/pandas-qt")
try:
from PyQt4.QtCore import QAbstractTableModel, Qt, QVariant, QModelIndex
from PyQt4.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
except ImportError:
from PySide.QtCore import QAbstractTableModel, Qt, QModelIndex
from PySide.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
QVariant = lambda value=None: value
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self):
super(DataFrameModel, self).__init__()
self.df = DataFrame()
def setDataFrame(self, dataFrame):
self.df = dataFrame
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not
efficient)'''
self.layoutChanged.emit()
#------------- table display functions -----------------
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
# return self.df.index.tolist()
return self.df.index.tolist()[section]
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
return QVariant(str(self.df.ix[index.row(), index.column()]))
def flags(self, index):
flags = super(DataFrameModel, self).flags(index)
flags |= Qt.ItemIsEditable
return flags
def setData(self, index, value, role):
row = self.df.index[index.row()]
col = self.df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self.df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
self.df.set_value(row, col, value)
return True
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self, dataFrame, parent=None):
super(DataFrameWidget, self).__init__(parent)
self.dataModel = DataFrameModel()
self.dataTable = QTableView()
self.dataTable.setModel(self.dataModel)
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
# Set DataFrame
self.setDataFrame(dataFrame)
def setDataFrame(self, dataFrame):
self.dataModel.setDataFrame(dataFrame)
self.dataModel.signalUpdate()
self.dataTable.resizeColumnsToContents()
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int': [1, 2, 3], 'float': [1.5, 2.5, 3.5],
'string': ['a', 'b', 'c'], 'nan': [np.nan, np.nan, np.nan]}
return DataFrame(data, index=Index(['AAA', 'BBB', 'CCC']),
columns=['int', 'float', 'string', 'nan'])
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
df = testDf() # make up some data
widget = DataFrameWidget(df)
widget.resizeColumnsToContents()
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
if __name__ == '__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.