repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
hamogu/doc | source/figures/schematic_pileup.py | 2 | 1265 | '''Generate schematic of a pile-up event
use: python schematic_pileup.py figurename.png
'''
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # import required for projection='3d'
from figurescripts import savefig
event1 = np.array([[0.0, 0.0, 0.0],
[0.0, 1.4, 0.1],
[1.0, 0.0, 0.0]])
event2 = np.array([[0.0, 0.0, 0.0],
[0.8, 1.3, 0.0],
[0.4, 0.0, 0.0]])
xpos = np.arange(3)
ypos = xpos.copy()
xpos, ypos = np.meshgrid(xpos, ypos)
xpos = xpos.flatten()
ypos = ypos.flatten()
z0 = np.zeros_like(event1).flatten()
dx = np.ones_like(z0)
dy = dx.copy()
ind = event1.flatten() > 0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.bar3d(xpos[ind], ypos[ind], z0[ind], dx[ind], dy[ind], event1.flatten()[ind], color='b', zsort='average', alpha=None)
ind = event2.flatten() > 0
ax.bar3d(xpos[ind], ypos[ind], event1.flatten()[ind], dx[ind], dy[ind], event2.flatten()[ind], color='r', zsort='average', alpha = None)
ax.set_xlim3d(3, 0)
ax.set_ylim3d(0, 3)
ax.set_zlabel('PHA [keV]')
ax.set_xlabel('pixel')
ax.set_ylabel('pixel')
ax.set_title('Piled-up event')
fig.subplots_adjust(0,0,1,1)
savefig(fig, sys.argv[1])
| gpl-2.0 |
zakkum42/Bosch | src/04-model/bosch_full_autoencoder_from_dir_with_fit_x.py | 1 | 20637 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import os
import math
import glob
from sklearn.metrics import mean_squared_error
from include.dataset_fnames import generate_station_data_fname, generate_data_fname, generate_response_data_fname, train_categorical_onehot_filename, train_station_date_filename
from include.feature_lists import numeric_features, numeric_missing_features_list, numeric_features_to_work_on, categoric_features
import theano
import theano.tensor as T
import keras
from keras import backend as K
from keras.layers import Input, Dense, Dropout
from keras.models import Model, Sequential
from keras.callbacks import Callback
#from keras.losses import mean_squared_error
from keras.optimizers import Adam
from BoschNNModel1 import BoschNNModel, BoschNNInput, BoschNNOutput, BoschNNCallback
from sklearn.model_selection import KFold
from keras.callbacks import History, EarlyStopping
import random
from datetime import datetime
cv_splits = 3
def batch_count_csv(dirname):
fnames = glob.glob1(dirname, "train_numeric_*")
return len(fnames)
def batch_count_npz(dirname):
fnames = glob.glob1(dirname, "train*.npz")
return len(fnames)
def bosch_data_generator_from_csv(dirname, indices, shuffle=False, use_categoric_features=False, use_date_features=False):
fname = generate_data_fname(sample_type='train', data_type='numeric')
numeric_columns = pd.read_csv(fname, nrows=2).columns
numeric_fnames = sorted(glob.glob1(dirname, "train_numeric_*"))
if use_categoric_features:
fname = train_categorical_onehot_filename
categoric_columns = pd.read_csv(fname, nrows=2).columns
categoric_fnames = sorted(glob.glob1(dirname, "train_categorical_*"))
if use_date_features:
fname = train_station_date_filename
station_date_columns = pd.read_csv(fname, nrows=2).columns
station_date_fnames = sorted(glob.glob1(dirname, "train_station_date_*"))
if not shuffle:
random.shuffle(indices)
for list_index in range(len(indices)):
t0 = datetime.now()
numeric_fname = os.path.join(dirname, numeric_fnames[indices[list_index]])
numeric_df = pd.read_csv(numeric_fname, names=numeric_columns, index_col='Id')
del numeric_df['Response']
# print datetime.now() - t0
if use_categoric_features:
t0 = datetime.now()
categoric_fname = os.path.join(dirname, categoric_fnames[indices[list_index]])
categoric_df = pd.read_csv(categoric_fname, names=categoric_columns, index_col='Id')
numeric_df = numeric_df.join(categoric_df, how='inner')
# print datetime.now() - t0
del categoric_df
if use_date_features:
t0 = datetime.now()
station_date_fname = os.path.join(dirname, station_date_fnames[indices[list_index]])
station_date_df = pd.read_csv(station_date_fname, names=station_date_columns, index_col='Id')
station_date_df = station_date_df / 1719.0 # Normalization
numeric_df = numeric_df.join(station_date_df, how='inner')
# print datetime.now() - t0
del station_date_df
yield numeric_df.values, numeric_df.values, list_index+1
del numeric_df
def bosch_data_generator_from_npz(dirname, indices, shuffle=False, use_categoric_features=False, use_date_features=False):
assert (use_categoric_features == True), "Compressed numeric only values not implemented."
# TODO:
assert (use_date_features == False), "Compressed date values not implemented."
fname = generate_data_fname(sample_type='train', data_type='numeric')
numeric_columns = pd.read_csv(fname, nrows=2).columns
if use_categoric_features:
fname = train_categorical_onehot_filename
categoric_columns = pd.read_csv(fname, nrows=2).columns
compressed_fnames = sorted(glob.glob1(dirname, "train_numeric+categoric_*.npz"))
if not shuffle:
random.shuffle(indices)
for list_index in range(len(indices)):
t0 = datetime.now()
compressed_fname = os.path.join(dirname, compressed_fnames[indices[list_index]])
dataz = np.load(compressed_fname)
data = dataz['data']
# print "Loaded in", datetime.now() - t0
yield data, data, list_index+1
del dataz
del data
def load_anomaly_data(use_categoric_features, use_date_features):
fname = generate_data_fname(sample_type='train', data_type='numeric')
numeric_columns = pd.read_csv(fname, nrows=2).columns
numeric_fname = "d:/Kaggle_ws/Bosch/input/train_numeric_headless_1.csv"
numeric_df = pd.read_csv(numeric_fname, names=numeric_columns, index_col='Id')
del numeric_df['Response']
if use_categoric_features:
fname = train_categorical_onehot_filename
categoric_columns = pd.read_csv(fname, nrows=2).columns
categoric_fname = "d:/Kaggle_ws/Bosch/input/train_categorical_onehot_headless_1.csv"
categoric_df = pd.read_csv(categoric_fname, names=categoric_columns, index_col='Id')
numeric_df = numeric_df.join(categoric_df, how='inner')
del categoric_df
if use_date_features:
fname = train_station_date_filename
station_date_columns = pd.read_csv(fname, nrows=2).columns
station_date_fname = "d:/Kaggle_ws/Bosch/input/train_station_date_headless_1.csv"
station_date_df = pd.read_csv(station_date_fname, names=station_date_columns, index_col='Id')
station_date_df = station_date_df / 1719.0 # Normalization
numeric_df = numeric_df.join(station_date_df, how='inner')
del station_date_df
return numeric_df.values
def my_mean_squared_error(y_true, y_pred):
mask = T.invert(T.isnan(y_true))
y_true = y_true[mask.nonzero()]
y_pred = y_pred[mask.nonzero()]
return K.mean(K.square(y_pred - y_true), axis=-1)
# [968, 484, 32, 16, 10, 16, 32, 484, 968]
def load_model(layer_list):
print "Building network with layers", layer_list
cols = layer_list[0]
model = Sequential() # was BoschNNModel()
# Create Input Layer
inputLayer = BoschNNInput(units=layer_list[1], input_shape=(cols,), name="first", activation='relu')
# Add layers to model
model.add(inputLayer)
for i in range(2, len(layer_list) -1):
model.add(Dense(layer_list[i], name='hidden_'+str(i), activation='relu'))
# Add Dropout Layer
model.add(Dropout(name="Dropout", rate=0.5))
# Create Output Layer
# outputLayer = BoschNNOutput(model, cols, name='last', activation='linear')
outputLayer = Dense(cols, name='last', activation='linear')
model.add(outputLayer)
return model
def train_autoencoder_model(dirname, model_name, hidden_layers, use_categoric_features=False, use_date_features=False, use_compressed=False):
fname = generate_data_fname(sample_type='train', data_type='numeric')
numeric_columns = pd.read_csv(fname, nrows=2).columns
cols = len(numeric_columns) -1 -1 # one for 'Id' one for 'Respose'
if use_categoric_features:
fname = train_categorical_onehot_filename
categoric_columns = pd.read_csv(fname, nrows=2).columns
cols = cols + len(categoric_columns) -1 # one for 'Id'
if use_date_features:
fname = train_station_date_filename
station_date_columns = pd.read_csv(fname, nrows=2).columns
cols = cols + len(station_date_columns) -1 # one for 'Id'
anomaly_df = load_anomaly_data(use_categoric_features, use_date_features)
layer_list = [cols] + eval(hidden_layers) + [cols]
# Load model from somewhere
boschAutoencoderModel = load_model(layer_list)
# Compile model
boschAutoencoderModel.compile(optimizer='adam', loss=my_mean_squared_error)
# this is what we have in the model
boschAutoencoderModel.summary()
# Initialize Callback for weight processing
# boschNNCallback = BoschNNCallback()
# boschModel.fit(X, y, epochs=10, batch_size=1, shuffle=False, verbose=True, callbacks=[boschNNCallback])
# boschAutoencoderModel.fit(X_train, X_train,
# epochs=5,
# batch_size=1,
# shuffle=False,
# # callbacks=[boschNNCallback],
# validation_data=(X_test, X_test))
batch_count = -1 # No of files to load at every CV
if not use_compressed:
batch_count = batch_count_csv(dirname)
else:
batch_count = batch_count_npz(dirname)
print batch_count
kf = KFold(n_splits=cv_splits)
earlystoping = EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=3, verbose=1, mode='auto')
final_mse = 0.0
best_cv_mse = 99999
cv_count = 1
for train_indices, test_indices in kf.split(range(batch_count)):
print "----------------------------------------------------------------------"
print "CV:", cv_count, "/", cv_splits
# Rewind model!
# Load model from somewhere
boschAutoencoderModel = load_model(layer_list)
# Compile model
# optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.001)
boschAutoencoderModel.compile(optimizer='adam', loss=my_mean_squared_error)
# Go 3 times on each batch
for top_epoch, _ in enumerate(range(50),1):
print "Top epoch:", top_epoch
if not use_compressed:
generator = bosch_data_generator_from_csv(dirname, train_indices, shuffle=True, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
else:
generator = bosch_data_generator_from_npz(dirname, train_indices, shuffle=True, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
history = History()
for X, y, file_index in generator:
print "Set", file_index, "of", len(train_indices)
boschAutoencoderModel.fit(X, y,
# batch_size=4092,
batch_size=6138,
epochs=50,
# verbose=2,
validation_split=0.33,
shuffle=False,
callbacks=[history, earlystoping]
)
print cv_count, top_epoch, file_index
print history.history
del X
del y
fname_model = os.path.join(model_name + "_CV_" + str(cv_count) + ".h5")
print "Saving model to", fname_model, "."
boschAutoencoderModel.save(fname_model)
print "Validating..."
cv_mse = 0.0
total = 0.0
if not use_compressed:
validation_generator = bosch_data_generator_from_csv(dirname, test_indices, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
else:
validation_generator = bosch_data_generator_from_npz(dirname, test_indices, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
for X, y, _ in validation_generator:
predictions = boschAutoencoderModel.predict(X)
y_pred = predictions[np.invert(np.isnan(X)).nonzero()]
y_true = y[np.invert(np.isnan(X)).nonzero()]
counts = y_true.shape[0]
mse_ = mean_squared_error(y_true, y_pred)
cv_mse += counts * mse_
total += counts
#
# mask = np.invert(np.isnan(X))
# predictions = boschAutoencoderModel.predict(X)
# y_pred = predictions[mask.nonzero()]
#
# del X
# del predictions
#
# y_true = y[mask.nonzero()]
#
# del y
# del mask
#
# counts = y_true.shape[0]
# mse_ = mean_squared_error(y_true, y_pred)
# cv_mse += counts * mse_
# total += counts
#
# del y_true
# del y_pred
cv_mse = cv_mse / total
print "CV MSE:", cv_mse
if cv_mse < best_cv_mse:
best_cv_mse = cv_mse
fname_model = os.path.join(model_name + "_best_model" + ".h5")
print "New best CV MSE. Saving model to", fname_model, "."
boschAutoencoderModel.save(fname_model)
predictions = boschAutoencoderModel.predict(anomaly_df)
y_pred = predictions[np.invert(np.isnan(anomaly_df)).nonzero()]
y_true = anomaly_df[np.invert(np.isnan(anomaly_df)).nonzero()]
a_mse_ = mean_squared_error(y_true, y_pred)
print "Anomaly error:", a_mse_
final_mse += cv_mse
cv_count += 1
print "Final MSE:", final_mse / cv_splits
return
def load_encoder_model(model_fname, bottleneck):
print "Loading", model_fname,
ae_model = keras.models.load_model(
model_fname,
custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
bottleneck_layer = 'hidden_' + str(bottleneck)
encoder_model = Model(inputs=ae_model.input, outputs=ae_model.get_layer(bottleneck_layer).output)
print "Done."
return encoder_model
def load_autoencoder_model(model_fname):
print "Loading", model_fname,
ae_model = keras.models.load_model(
model_fname,
custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
print "Done."
return ae_model
def evaluate_model(model_fname, use_categoric_features=False):
boschAutoencoderModel = keras.models.load_model(
model_fname,
custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
boschAutoencoderModel.summary()
anomaly_df = load_anomaly_data(use_categoric_features)
predictions = boschAutoencoderModel.predict(anomaly_df)
mask = np.isnan(anomaly_df)
predictions = boschAutoencoderModel.predict(anomaly_df)
y_pred = predictions[np.invert(mask).nonzero()]
y_true = anomaly_df[np.invert(mask).nonzero()]
a_mse_ = mean_squared_error(y_true, y_pred)
# Is it?
# predictions = boschAutoencoderModel.predict(anomaly_df)
# predictions[mask] = 0
# anomaly_df[mask] = 0
# a_mse_ = mean_squared_error(anomaly_df, predictions)
print "Anomaly error:", a_mse_
return
def evaluate2(model_fname, use_categoric_features=False):
anomaly_df = load_anomaly_data(use_categoric_features)
boschAutoencoderModel = keras.models.load_model(
model_fname,
custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
boschAutoencoderModel.summary()
print "Anomaly error:", a_mse_
return
def predict_with_ae_model(dirname, model_fname, use_categoric_features=False, use_date_features=False, use_compressed=False):
ae_model = load_autoencoder_model(model_fname)
ae_model.summary()
batch_count = batch_count_csv(dirname)
generator = bosch_data_generator_from_csv(dirname, range(batch_count), use_categoric_features=use_categoric_features, use_date_features=use_date_features)
error_list = []
for X, y, file_index in generator:
print "Set", file_index, "of", batch_count,
for row_index in range(X.shape[0]):
Xr = np.reshape(X[row_index], (1, X.shape[1]))
yr = np.reshape(y[row_index], (1, X.shape[1]))
mask = np.isnan(Xr)
predictions = ae_model.predict(Xr)
y_pred = predictions[np.invert(mask).nonzero()]
y_true = yr[np.invert(mask).nonzero()]
if len(y_true) > 0: # Check for all input is NaN!
a_mse_ = mean_squared_error(y_true, y_pred)
else:
a_mse = 0 # I am not sure
error_list.append(a_mse_)
if ((row_index % 1000) == 0): print "*",
print
np.save('errors_normal.npy', np.asmatrix(error_list, dtype='float32'))
pd.Series(data=error_list).to_csv('errors_normal.csv')
X = load_anomaly_data(use_categoric_features, use_date_features)
y = X
error_list = []
for row_index in range(X.shape[0]):
Xr = np.reshape(X[row_index], (1, X.shape[1]))
yr = np.reshape(y[row_index], (1, X.shape[1]))
mask = np.isnan(Xr)
predictions = ae_model.predict(Xr)
y_pred = predictions[np.invert(mask).nonzero()]
y_true = yr[np.invert(mask).nonzero()]
if len(y_true) > 0: # Check for all input is NaN!
a_mse_ = mean_squared_error(y_true, y_pred)
else:
a_mse = 0 # I am not sure
error_list.append(a_mse_)
if ((row_index % 1000) == 0): print "*",
print
np.save('errors_anomaly.npy', np.asmatrix(error_list, dtype='float32'))
pd.Series(data=error_list).to_csv('errors_anomaly.csv')
return
def predict_with_encoder_model(dirname, model_fname, bottleneck, use_categoric_features=False, use_date_features=False, use_compressed=False):
encoder_model = load_encoder_model(model_fname, bottleneck=bottleneck)
encoder_model.summary()
output_feature_count = encoder_model.layers[-1].output_shape[1]
col_names = ['col_'+str(i) for i in range(output_feature_count)]
normal_df = pd.DataFrame(columns=col_names, dtype='float32')
batch_count = batch_count_csv(dirname)
if (not use_compressed):
generator = bosch_data_generator_from_csv(dirname, range(batch_count), use_categoric_features=use_categoric_features, use_date_features=use_date_features)
else:
generator = bosch_data_generator_from_npz(dirname, range(batch_count), use_categoric_features=use_categoric_features, use_date_features=use_date_features)
for X, _, file_index in generator:
print "Set", file_index, "of", batch_count
predictions = encoder_model.predict(X)
normal_df = normal_df.append(pd.DataFrame(data=predictions, columns=col_names, dtype='float32'))
normal_df['Response'] = 0
X = load_anomaly_data(use_categoric_features, use_date_features)
predictions = encoder_model.predict(X)
anomaly_df = pd.DataFrame(data=predictions, columns=col_names, dtype='float32')
anomaly_df['Response'] = 1
data_df = normal_df.append(anomaly_df)
data_df.to_csv("encoder_output.csv", index=False)
return
if __name__ == '__main__':
# Experiment 1 NO
# predict_with_encoder_model("bs30000", "experiment_1_no/BAE_model_cols_4-8_numeric_only_best_model.h5", 6, use_categoric_features=False, use_compressed=False)
# Experiment 2 NC
# predict_with_encoder_model("bs30000", "experiment_2_nc/BAE_model_cols_4-8_best_model.h5", 6, use_categoric_features=True, use_compressed=True)
# Experiment 3 NC
# predict_with_ae_model("bs30000", "experiment_5_nd_2-4/BAE_model_cols_2-4_numeric+date_best_model.h5", use_categoric_features=False, use_date_features=True, use_compressed=False)
# predict_with_encoder_model("bs30000", "experiment_5_nd_2-4/BAE_model_cols_2-4_numeric+date_best_model.h5", 6, use_categoric_features=False, use_date_features=True, use_compressed=False)
# exit (-1)
# Experiment 7 NC
predict_with_ae_model("bs30000", "experiment_7_nd_4-8/BAE_model_cols_4-8_numeric+date_best_model.h5", use_categoric_features=False, use_date_features=True, use_compressed=False)
predict_with_encoder_model("bs30000", "experiment_7_nd_4-8/BAE_model_cols_4-8_numeric+date_best_model.h5", 6, use_categoric_features=False, use_date_features=True, use_compressed=False)
exit (-1)
train_autoencoder_model('bs30000',
'BAE_model_cols_4-8_numeric+date',
'[cols/4, cols/8, 64, 32, 16, 6, 16, 32, 64, cols/8, cols/4]', # 15 M
# '[cols/2, cols/4, 64, 32, 16, 6, 16, 32, 64, cols/4, cols/2]', # 30M
use_categoric_features=False,
use_date_features=True,
use_compressed=False)
# fit2: train_autoencoder_model('bs50000', '[cols/2, cols/4, cols/8, 64, 32, 16, 6, 16, 32, 64, cols/8, cols/4, cols/2]')
| apache-2.0 |
potash/scikit-learn | sklearn/linear_model/sag.py | 14 | 11269 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol: double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
r9y9/librosa | librosa/decompose.py | 2 | 18417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Spectrogram decomposition
=========================
.. autosummary::
:toctree: generated/
decompose
hpss
nn_filter
"""
import numpy as np
import scipy.sparse
from scipy.ndimage import median_filter
import sklearn.decomposition
from . import core
from . import cache
from . import segment
from . import util
from .util.exceptions import ParameterError
__all__ = ['decompose', 'hpss', 'nn_filter']
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
"""Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(H,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(P,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
"""
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
# margin minimum is 1.0
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
# Compute median filters. Pre-allocation here preserves memory layout.
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
| isc |
flag0010/pop_gen_cnn | tajimas_D/generic_tajD.py | 2 | 3383 | from common import *
from itertools import izip
def tajD(N,S,k):
'''N is the number of indv
S is the number of seg. sites
k is the (ave number of pairwise nuc. diffs), e.g. total number of pairwise diffs divided by choose(N,2)
so:
i1 AT|T|GGCG|A|CAG|T
i2 AT|G|GGCG|C|CAG|A
i3 AT|G|GGCG|A|CAG|A
i4 AT|G|GGCG|C|CAG|T
N = 4, S = 3, k = 11/6 = 1.83333
D = 1.08976
'''
a1, a2 = 0.0, 0.0
for b in range(1,N):
a1 += 1. / b
a2 += 1. / (b * b)
b1 = (N + 1.) / (3. * (N - 1.))
b2 = (2. * N * N + 2. * N + 6.) / (9. * N * (N - 1.))
c1 = b1 - 1. / a1
c2 = b2 - (N + 2.) / (a1 * N) + a2 / (a1 * a1)
e1 = c1 / a1
e2 = c2 / (a1 * a1 + a2)
num = k - S / a1
den = (e1 * S + e2 * S * (S - 1))**0.5
D = num / den
return D
def calc_S_and_k_from_seqs(list_of_seqs):
###first a function of taking the "sequence configuration" and converting it to a tally of pairwise diffs
def pairwise_diffs2(x):
if len(x) == 1:
return 0
else:
q = 0.0
for i,j in pairwise(x):
q += i*j
return q
#now loop through sites and tally S and k
S, p = 0.0, 0.0
for n in izip(*list_of_seqs):
config = count_all(n).values()
diffs = pairwise_diffs2(config)
if diffs: ## all the non-seg sites return zero, which is False
p += diffs
S += 1
k = p / choose(len(list_of_seqs), 2)
return S, k
def seq_boot(seqs):
n = zip(*seqs)
idxs = range(len(n))
while 1:
k = defaultdict(list)
idx_list = sampler(idxs, len(n), replacement=True)
for idx in idx_list:
for pos, val in enumerate(n[idx]):
k[pos].append(val)
yield map(lambda s: ''.join(s), k.values())
def permute(seqs, reps = 1000):
n = []
repnum=0
boot = seq_boot(seqs)
while repnum < reps:
rep = boot.next()
N = len(rep)
S, k = calc_S_and_k_from_seqs(rep)
try:
val = tajD(N,S,k)
except:
val = 'NA'
if val != 'NA':
n.append(val)
repnum+=1
#print repnum
n.sort()
#print len(n)
lower_idx, upper_idx = int(len(n) * 0.025)-1, len(n) - int(len(n) * 0.025)
mean = lambda s: sum(s) / float(len(s))
return mean(n), n[lower_idx], n[upper_idx], len(n)
if __name__ == '__main__':
seqs = ['ATTGGCGACAGT', 'ATGGGCGCCAGA', 'ATGGGCGACAGA', 'ATGGGCGCCAGT']
##works with non DNA seq data too
#seqs = ['00001001000101'*100,
# '01101001000101'*100,
# '00001000010111'*100,
# '01101001010101'*100]
N = len(seqs)
S, k = calc_S_and_k_from_seqs(seqs)
print N, S, k
real = tajD(N,S,k)
mean_perm, lower_ci, upper_ci, perms = permute(seqs)
print "real Tajima's D", real
print 'mean of bootstraps', mean_perm
print 'lower 0.025 CI', lower_ci
print 'upper 0.975 CI', upper_ci
print 'num. replicates', perms
if lower_ci <= 0 <= upper_ci: print 'not sig. different from zero'
else: print 'sig. different from zero'
#from matplotlib import pyplot as plt
#plt.hist(n)
#plt.show()
#idx = 0
#for i in seqs:
# print '>'+str(idx)
# print i
# idx+=1
| gpl-3.0 |
darioizzo/pykep | pykep/trajopt/_pl2pl_N_impulses.py | 2 | 11321 | from pykep.core import epoch, DAY2SEC, lambert_problem, propagate_lagrangian, SEC2DAY, AU, ic2par
from pykep.planet import jpl_lp
from math import pi, cos, sin, log, acos
from scipy.linalg import norm
class pl2pl_N_impulses(object):
"""
This class is a pygmo (http://esa.github.io/pygmo/) problem representing a single leg transfer
between two planets allowing up to a maximum number of impulsive Deep Space Maneuvers.
The decision vector is::
[t0,T] + [alpha,u,v,V_inf] * (N-2) + [alpha] + ([tf])
... in the units: [mjd2000, days] + [nd, nd, m/sec, nd] + [nd] + [mjd2000]
Each time-of-flight can be decoded as follows, T_n = T log(alpha_n) / \sum_i(log(alpha_i))
.. note::
The resulting problem is box-bounded (unconstrained). The resulting trajectory is time-bounded.
"""
def __init__(self,
start=jpl_lp('earth'),
target=jpl_lp('venus'),
N_max=3,
tof=[20., 400.],
vinf=[0., 4.],
phase_free=True,
multi_objective=False,
t0=None
):
"""
prob = pykep.trajopt.pl2pl_N_impulses(start=jpl_lp('earth'), target=jpl_lp('venus'), N_max=3, tof=[20., 400.], vinf=[0., 4.], phase_free=True, multi_objective=False, t0=None)
Args:
- start (``pykep.planet``): the starting planet
- target (``pykep.planet``): the target planet
- N_max (``int``): maximum number of impulses
- tof (``list``): the box bounds [lower,upper] for the time of flight (days)
- vinf (``list``): the box bounds [lower,upper] for each DV magnitude (km/sec)
- phase_free (``bool``): when True, no rendezvous condition are enforced and start and arrival anomalies will be free
- multi_objective (``bool``): when True, a multi-objective problem is constructed with DV and time of flight as objectives
- t0 (``list``): the box bounds on the launch window containing two pykep.epoch. This is not needed if phase_free is True.
"""
# Sanity checks
# 1) all planets need to have the same mu_central_body
if (start.mu_central_body != target.mu_central_body):
raise ValueError(
'Starting and ending pykep.planet must have the same mu_central_body')
# 2) Number of impulses must be at least 2
if N_max < 2:
raise ValueError('Number of impulses N is less than 2')
# 3) If phase_free is True, t0 does not make sense
if (t0 is None and not phase_free):
t0 = [epoch(0), epoch(1000)]
if (t0 is not None and phase_free):
raise ValueError('When phase_free is True no t0 can be specified')
if (type(t0[0]) != type(epoch(0))):
t0[0] = epoch(t0[0])
if (type(t0[1]) != type(epoch(0))):
t0[1] = epoch(t0[1])
self.obj_dim = multi_objective + 1
# We then define all class data members
self.start = start
self.target = target
self.N_max = N_max
self.phase_free = phase_free
self.multi_objective = multi_objective
self.vinf = [s * 1000 for s in vinf]
self.__common_mu = start.mu_central_body
# And we compute the bounds
if phase_free:
self._lb = [0, tof[0]] + [1e-3, 0.0, 0.0,
vinf[0] * 1000] * (N_max - 2) + [1e-3] + [0]
self._ub = [2 * start.compute_period(epoch(0)) * SEC2DAY, tof[1]] + [1.0-1e-3, 1.0, 1.0, vinf[
1] * 1000] * (N_max - 2) + [1.0-1e-3] + [2 * target.compute_period(epoch(0)) * SEC2DAY]
else:
self._lb = [t0[0].mjd2000, tof[0]] + \
[1e-3, 0.0, 0.0, vinf[0] * 1000] * (N_max - 2) + [1e-3]
self._ub = [t0[1].mjd2000, tof[1]] + \
[1.0-1e-3, 1.0, 1.0, vinf[1] * 1000] * (N_max - 2) + [1.0-1e-3]
def get_nobj(self):
return self.obj_dim
def get_bounds(self):
return (self._lb, self._ub)
def fitness(self, x):
# 1 - we 'decode' the chromosome into the various deep space
# maneuvers times (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
DV1 = norm([a - b for a, b in zip(v_beg_l, vsc)])
DV2 = norm([a - b for a, b in zip(v_end_l, v_target)])
DV_others = sum(x[5::4])
if self.obj_dim == 1:
return (DV1 + DV2 + DV_others,)
else:
return (DV1 + DV2 + DV_others, x[1])
def plot(self, x, axes=None):
"""
ax = prob.plot_trajectory(x, axes=None)
- x: encoded trajectory
- axes: matplotlib axis where to plot. If None figure and axis will be created
- [out] ax: matplotlib axis where to plot
Plots the trajectory represented by a decision vector x on the 3d axis ax
Example::
ax = prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from pykep.orbit_plots import plot_planet, plot_lambert, plot_kepler
if axes is None:
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axes = fig.gca(projection='3d')
axes.scatter(0, 0, 0, color='y')
# 1 - we 'decode' the chromosome recording the various deep space
# maneuvers timing (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
plot_planet(self.start, t0=epoch(x[0]), color=(0.8, 0.6, 0.8), legend=True, units=AU, axes=axes, s=0)
plot_planet(self.target, t0=epoch(x[0] + x[1]), color=(0.8, 0.6, 0.8), legend=True, units=AU, axes=axes, s=0)
DV_list = x[5::4]
maxDV = max(DV_list)
DV_list = [s / maxDV * 30 for s in DV_list]
colors = ['b', 'r'] * (len(DV_list) + 1)
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
axes.scatter(rsc[0] / AU, rsc[1] / AU, rsc[2] /
AU, color='k', s=DV_list[i])
plot_kepler(rsc, vsc, T[i] * DAY2SEC, self.__common_mu,
N=200, color=colors[i], units=AU, axes=axes)
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
plot_lambert(l, sol=0, color=colors[
i + 1], legend=False, units=AU, axes=axes, N=200)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
DV1 = norm([a - b for a, b in zip(v_beg_l, vsc)])
DV2 = norm([a - b for a, b in zip(v_end_l, v_target)])
axes.scatter(rsc[0] / AU, rsc[1] / AU, rsc[2] / AU,
color='k', s=min(DV1 / maxDV * 30, 40))
axes.scatter(r_target[0] / AU, r_target[1] / AU,
r_target[2] / AU, color='k', s=min(DV2 / maxDV * 30, 40))
return axes
def pretty(self, x):
# 1 - we 'decode' the chromosome recording the various deep space
# maneuvers timing (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
DV1 = norm([a - b for a, b in zip(v_beg_l, vsc)])
DV2 = norm([a - b for a, b in zip(v_end_l, v_target)])
DV_others = list(x[5::4])
DV_others.extend([DV1, DV2])
print("Total DV (m/s): ", sum(DV_others))
print("Dvs (m/s): ", DV_others)
print("Tofs (days): ", T)
| gpl-3.0 |
ilo10/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
zuku1985/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
sposs/DIRAC | Core/Utilities/Graphs/BarGraph.py | 10 | 6337 | ########################################################################
# $HeadURL$
########################################################################
""" BarGraph represents bar graphs with vertical bars both simple
and stacked.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pixelToPoint, \
PrettyDateLocator, PrettyDateFormatter, \
PrettyScalarFormatter
from pylab import setp
from matplotlib.patches import Polygon
from matplotlib.dates import date2num
import datetime
class BarGraph( PlotBase ):
"""
The BarGraph class is a straightforward bar graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
if self.prefs.has_key('span'):
self.width = self.prefs['span']
else:
self.width = 1.0
if self.gdata.key_type == "time":
# Try to guess the time bin span
nKeys = self.gdata.getNumberOfKeys()
self.width = (max(self.gdata.all_keys)-min(self.gdata.all_keys))/(nKeys-1)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
tmp_x = []; tmp_y = []
# Evaluate the bar width
width = float(self.width)
if self.gdata.key_type == 'time':
#width = (1 - self.bar_graph_space) * width / 86400.0
width = width / 86400.0
offset = 0
elif self.gdata.key_type == 'string':
self.bar_graph_space = 0.1
width = (1 - self.bar_graph_space) * width
offset = self.bar_graph_space / 2.0
else:
offset = 0
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
if self.prefs.has_key('log_yaxis'):
tmp_b = [0.001]*nKeys
ymin = 0.001
else:
tmp_b = [0.]*nKeys
ymin = 0.
self.polygons = []
self.lines = []
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
seq_b = [(self.gdata.max_num_key+width,0.0),(self.gdata.min_num_key,0.0)]
zorder = 0.0
dpi = self.prefs.get('dpi',100)
for label,num in labels:
color = self.palette.getColor(label)
ind = 0
tmp_x = []
tmp_y = []
tmp_t = []
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
value = 0.
tmp_x.append( offset+key )
#tmp_y.append( ymin )
tmp_y.append( 0.001 )
tmp_x.append( offset+key )
tmp_y.append( float(value)+tmp_b[ind] )
tmp_x.append( offset+key+width )
tmp_y.append( float(value)+tmp_b[ind] )
tmp_x.append( offset+key+width )
#tmp_y.append( ymin )
tmp_y.append( 0.001 )
tmp_t.append(float(value)+tmp_b[ind])
ind += 1
seq_t = zip(tmp_x,tmp_y)
seq = seq_t+seq_b
poly = Polygon( seq, facecolor=color, fill=True,
linewidth=pixelToPoint(0.2,dpi),
zorder=zorder)
self.ax.add_patch( poly )
self.polygons.append( poly )
tmp_b = list(tmp_t)
zorder -= 0.1
tight_bars_flag = self.prefs.get('tight_bars',False)
if tight_bars_flag:
setp( self.polygons, linewidth=0. )
#pivots = keys
#for idx in range(len(pivots)):
# self.coords[ pivots[idx] ] = self.bars[idx]
ymax = max(tmp_b)
ymax *= 1.1
if self.prefs.has_key('log_yaxis'):
ymin = 0.001
else:
ymin = min( tmp_b, 0. )
ymin *= 1.1
xmax=max(tmp_x)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax+offset )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
#ax.set_xlim( xmin=self.begin_num,xmax=self.end_num )
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
#labels = ax.get_xticklabels()
else:
try:
super(BarGraph, self).x_formatter_cb( ax )
except:
return None
| gpl-3.0 |
nitikayad96/chandra_suli | chandra_suli/check_hot_pixel_revised.py | 1 | 8674 | #!/usr/bin/env python
"""
Check to see if candidate transients are actually hot pixels
"""
import argparse
import glob
import os
import sys
import astropy.io.fits as pyfits
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import euclidean_distances
from chandra_suli import logging_system
from chandra_suli.run_command import CommandRunner
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check to see if transient candidates are actually hot pixels")
parser.add_argument("--obsid", help="Observation ID Numbers", type=int, required=True)
parser.add_argument("--evtfile", help="Filtered CCD file", required=True)
parser.add_argument("--bbfile", help="Text file for one CCD with transient candidates listed", required=True)
parser.add_argument("--outfile", help="Name of output text file file", required=True)
parser.add_argument("--debug", help="Debug mode? (yes or no)", required=True)
# Get logger for this command
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Instance the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
# Find region files for each candidate transient
evtfile = os.path.abspath(os.path.expandvars(os.path.expanduser(args.evtfile)))
bbfile = os.path.abspath(os.path.expandvars(os.path.expanduser(args.bbfile)))
# read BB data into array
bb_data = np.array(np.recfromtxt(bbfile, names=True), ndmin=1)
evt_file_name = os.path.splitext(os.path.basename(evtfile))[0]
reg_files = glob.glob('%s_candidate*reg' % evt_file_name)
# make sure files are sorted
def extract_number(s):
return int(os.path.splitext(s)[0].split("_")[-1])
reg_files_sorted = sorted(reg_files, key=extract_number)
# Find CCD number based on file name
names = os.path.splitext(evt_file_name)[0].split("_")
idx = names.index("ccd")
ccd_num = names[idx + 1]
def neighbor_pixel_check(cluster_coords):
hot_pix_flag = True
all_distances = euclidean_distances(cluster_coords, cluster_coords)
if args.debug == "yes":
print all_distances
for distances in all_distances:
for distance in distances:
if distance < 2:
pass
else:
hot_pix_flag = False
break
if hot_pix_flag == False:
break
return hot_pix_flag
with open(args.outfile, "w") as f:
# Pre-existing column names
existing_column_names = " ".join(bb_data.dtype.names)
f.write("# Candidate Obsid CCD %s Duration N_events Hot_Pixel_Flag\n" % existing_column_names)
for n, reg_file in enumerate(reg_files_sorted):
tstart = bb_data['Tstart'][n]
tstop = bb_data['Tstop'][n]
temp_reg_file = "temp_reg_%s.fits" % (n + 1)
# Make temporary fits region file with region determined by xtdac
cmd_line = 'ftcopy \"%s[EVENTS][regfilter(\'%s\') && (TIME >= %s) && (TIME <= %s)]\" %s clobber=yes ' \
% (evtfile, reg_file, tstart, tstop, temp_reg_file)
runner.run(cmd_line)
# Initialize hot_pix_flag to False
hot_pix_flag = False
# Open temporary region file to get chip coordinates
with pyfits.open(temp_reg_file, memmap=False) as reg:
chipx = reg['EVENTS'].data.chipx
chipy = reg['EVENTS'].data.chipy
# Count number of events in the region in this time interval
n_events = chipx.shape[0]
if len(chipx) == 0:
print "\n\n%s had no events between %s and %s!\n\n" % (reg_file, tstart, tstop)
elif len(chipx) < 15:
# Organize into list of coordinate pairs
coords = []
for i in range(len(chipx)):
temp = []
temp.append(chipx[i])
temp.append(chipy[i])
coords.append(temp)
# Run DBSCAN
db = DBSCAN(eps=2, min_samples=2).fit(coords)
l = db.labels_
csi = db.core_sample_indices_
c = db.components_
if args.debug == "yes":
print coords
print l
# Run this if more than one cluster found
if len(np.unique(l)) > 1:
labels_unique = np.unique(l)
if args.debug == "yes":
print labels_unique
if labels_unique[0] == -1:
labels_unique = labels_unique[1::]
if args.debug == "yes":
print labels_unique
hot_pix_flags = []
org_data_idx = []
# Create a list of indices corresponding to each label
# [[indices of items in cluster 0],[indices of items in cluster 1],...]
for i in labels_unique:
temp = []
for a, b in enumerate(l):
if b == i:
temp.append(a)
org_data_idx.append(temp)
# Get x and y coordinates for each label
for i in range(len(org_data_idx)):
x = []
y = []
cluster_coords = []
for idx in org_data_idx[i]:
cluster_coords.append(coords[idx])
x.append(coords[idx][0])
y.append(coords[idx][1])
if args.debug == "yes":
print cluster_coords
npx = np.array(x)
npy = np.array(y)
npxl = len(np.unique(npx))
npyl = len(np.unique(npy))
# Check if coordinates are all the same and adjust hot_pix_flag accordingly
if npxl == 1 and npyl == 1:
hot_pix_flags.append(True)
# If not, check for neighboring coordinates
else:
hot_pix_flag = neighbor_pixel_check(cluster_coords)
hot_pix_flags.append(hot_pix_flag)
# If every label is flagged as a hot pixel, flag transient as hot pixel
hot_pix_flags = np.array(hot_pix_flags)
flags = np.unique(hot_pix_flags)
if len(flags) == 1 and flags[0] == True:
hot_pix_flag = True
if args.debug == "yes":
print hot_pix_flags
print hot_pix_flag
# If only one cluster is found
else:
x = []
y = []
for i in range(len(coords)):
x.append(coords[i][0])
y.append(coords[i][1])
npxl = len(np.unique(np.array(x)))
npyl = len(np.unique(np.array(y)))
# Check that all coordinates in cluster are the same
if npxl == 1 and npyl == 1:
hot_pix_flag = True
# If all coordinates are not same, check for neighboring pixels
if hot_pix_flag == False:
hot_pix_flag = neighbor_pixel_check(coords)
if args.debug == "yes":
print hot_pix_flag
# Write to outfile
temp_list = []
temp_list.append(str(n + 1))
temp_list.append(str(args.obsid))
temp_list.append(str(ccd_num))
for j in range(len(bb_data.dtype.names)):
temp_list.append(str(bb_data[n][j]))
# Fill Duration column
temp_list.append("%.1f" % (tstop - tstart))
# Fill N_events column
temp_list.append("%i" % (n_events))
# Fill "Hot_Pixel_Flag" column
temp_list.append(str(hot_pix_flag))
line = " ".join(temp_list)
f.write("%s\n" % line)
if args.debug == "no":
os.remove(temp_reg_file)
if args.debug == "yes":
print "NOTE: Debug mode, temporary region files not deleted"
| bsd-3-clause |
ScienceStacks/SciSheets | mysite/scisheets/plugins/exportTableToExcel.py | 2 | 1394 | """
Exports an Excel file
"""
import openpyxl
import pandas as pd
def _exportDataframeToExcel(df, filepath, worksheet=None):
"""
Export the specified columns to the excel file.
The existing worksheet is overwritten.
:param str filepath: full path to CSV file
:param pandas.DataFrame df:
:param str worksheet: worksheet to import. Default is first.
:raises IOError, ValueError:
"""
if worksheet is None:
worksheet = 'Sheet1'
book = openpyxl.load_workbook(filepath)
# Delete the sheet to write, if it exists
if worksheet in book.get_sheet_names():
ws = book.get_sheet_by_name(worksheet)
book.remove_sheet(ws)
writer = pd.ExcelWriter(filepath, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer, worksheet, index=False)
writer.save()
#TODO: Eliminate column_name since have an ExtendedArray
def exportTableToExcel(s, filepath, worksheet=None, columns=None):
"""
Export the specified columns of the Table to the excel file.
The existing worksheet is overwritten.
:param API s:
:param str filepath: full path to CSV file
:param str worksheet: worksheet to import. Default is first.
:param list-of-str columns: columns of columns added
:raises ValueError:
"""
df = s.tableToDataframe(colnms=columns)
_exportDataframeToExcel(df, filepath, worksheet=worksheet)
| apache-2.0 |
jlegendary/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
nickcdryan/hep_ml | setup.py | 3 | 1554 | from setuptools import setup
import codecs
with codecs.open('README.rst', encoding='utf-8') as readme_file:
long_description = readme_file.read()
setup(
name="hep_ml",
version='0.3.0',
description="Machine Learning for High Energy Physics",
long_description=long_description,
url='https://github.com/arogozhnikov/hep_ml',
# Author details
author='Alex Rogozhnikov',
author_email='axelr@yandex-team.ru',
# Choose your license
license='Apache 2.0',
packages=['hep_ml'],
package_dir={'hep_ml': 'hep_ml'},
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7 ',
'Programming Language :: Python :: 3.4 ',
],
# What does your project relate to?
keywords='machine learning, supervised learning, '
'uncorrelated methods of machine learning, high energy physics, particle physics',
# List run-time dependencies here. These will be installed by pip when your project is installed.
install_requires=[
'numpy >= 1.9',
'scipy >= 0.15.0',
'pandas >= 0.14.0',
'scikit-learn >= 0.15.2',
'theano >= 0.7',
'six',
],
) | apache-2.0 |
goyalankit/po-compiler | object_files/networkx-1.8.1/examples/drawing/sampson.py | 40 | 1379 | #!/usr/bin/env python
"""
Sampson's monastery data.
Shows how to read data from a zip file and plot multiple frames.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import zipfile, cStringIO
import networkx as nx
import matplotlib.pyplot as plt
zf = zipfile.ZipFile('sampson_data.zip') # zipfile object
e1=cStringIO.StringIO(zf.read('samplike1.txt')) # read info file
e2=cStringIO.StringIO(zf.read('samplike2.txt')) # read info file
e3=cStringIO.StringIO(zf.read('samplike3.txt')) # read info file
G1=nx.read_edgelist(e1,delimiter='\t')
G2=nx.read_edgelist(e2,delimiter='\t')
G3=nx.read_edgelist(e3,delimiter='\t')
pos=nx.spring_layout(G3,iterations=100)
plt.clf()
plt.subplot(221)
plt.title('samplike1')
nx.draw(G1,pos,node_size=50,with_labels=False)
plt.subplot(222)
plt.title('samplike2')
nx.draw(G2,pos,node_size=50,with_labels=False)
plt.subplot(223)
plt.title('samplike3')
nx.draw(G3,pos,node_size=50,with_labels=False)
plt.subplot(224)
plt.title('samplike1,2,3')
nx.draw(G3,pos,edgelist=G3.edges(),node_size=50,with_labels=False)
nx.draw_networkx_edges(G1,pos,alpha=0.25)
nx.draw_networkx_edges(G2,pos,alpha=0.25)
plt.savefig("sampson.png") # save as png
plt.show() # display
| apache-2.0 |
carthach/essentia | src/examples/python/musicbricks-tutorials/2-sinemodel_analsynth.py | 1 | 2076 |
import essentia
import essentia.streaming as es
# import matplotlib for plotting
import matplotlib.pyplot as plt
import numpy as np
import sys
# algorithm parameters
params = { 'frameSize': 2048, 'hopSize': 512, 'startFromZero': False, 'sampleRate': 44100,'maxnSines': 100,'magnitudeThreshold': -74,'minSineDur': 0.02,'freqDevOffset': 10, 'freqDevSlope': 0.001}
# loop over all frames
audioout = np.array(0)
counter = 0
# input and output files
import os.path
tutorial_dir = os.path.dirname(os.path.realpath(__file__))
inputFilename = os.path.join(tutorial_dir, 'singing-female.wav')
outputFilename = os.path.join(tutorial_dir, 'singing-female-out-sinemodel.wav')
out = np.array(0)
loader = es.MonoLoader(filename = inputFilename, sampleRate = params['sampleRate'])
pool = essentia.Pool()
fcut = es.FrameCutter(frameSize = params['frameSize'], hopSize = params['hopSize'], startFromZero = False);
w = es.Windowing(type = "blackmanharris92");
fft = es.FFT(size = params['frameSize']);
smanal = es.SineModelAnal(sampleRate = params['sampleRate'], maxnSines = params['maxnSines'], magnitudeThreshold = params['magnitudeThreshold'], freqDevOffset = params['freqDevOffset'], freqDevSlope = params['freqDevSlope'])
smsyn = es.SineModelSynth(sampleRate = params['sampleRate'], fftSize = params['frameSize'], hopSize = params['hopSize'])
ifft = es.IFFT(size = params['frameSize']);
overl = es.OverlapAdd (frameSize = params['frameSize'], hopSize = params['hopSize'], gain = 1./params['frameSize'] );
awrite = es.MonoWriter (filename = outputFilename, sampleRate = params['sampleRate']);
# analysis
loader.audio >> fcut.signal
fcut.frame >> w.frame
w.frame >> fft.frame
fft.fft >> smanal.fft
smanal.magnitudes >> (pool, 'magnitudes')
smanal.frequencies >> (pool, 'frequencies')
smanal.phases >> (pool, 'phases')
# synthesis
smanal.magnitudes >> smsyn.magnitudes
smanal.frequencies >> smsyn.frequencies
smanal.phases >> smsyn.phases
smsyn.fft >> ifft.fft
ifft.frame >> overl.frame
overl.signal >> awrite.audio
overl.signal >> (pool, 'audio')
essentia.run(loader)
| agpl-3.0 |
afrachioni/umbrella | tools/plot_pmf.py | 1 | 1619 | #!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import pylab
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy
def rescale(arr):
min = 1e20
for x in arr:
if x < min:
min = x
for i in range(len(arr)):
arr[i] = (arr[i] - min) # / something
def flatten(arr):
for elem in arr:
elem[1] -= 0
#elem[1] -= lj(elem[0])
#############################
kt = 8.617e-4
#f = lambda x : 1 * kt * numpy.log(4 * numpy.pi * x**2)
f = lambda x : 0
sigma = 1
epsilon = 10 * kt
trunc = [0, 1]
lj = lambda x : 4 * epsilon * ((x / sigma)**(-12) - (x / sigma)**(-6)) + epsilon
x = numpy.arange(1, 3, 0.01)
y = map(lj, x)
plt.plot(x, y, color='black')
#################
## Trapezoids
#data = numpy.loadtxt("A_trap.txt")[(2*trunc[0]):(-2*trunc[1])]
#flatten(data)
#corrected = data[:,1] + map(f, data[:,0])
#rescale(corrected)
## line up tails
##corrected = map(lambda x : x + lj(data[-1,0]) - corrected[-1], corrected)
#plt.plot(data[:,0], corrected, 'b+')
#
#
#################
## Simpson's rule
#data = numpy.loadtxt("A_simps.txt")[(trunc[0]):(-trunc[1])]
#flatten(data)
#corrected = data[:,1] + map(f, data[:,0])
#rescale(corrected)
## line up tails
##corrected = map(lambda x : x + lj(data[-1,0]) - corrected[-1], corrected)
#plt.plot(data[:,0], corrected, 'rx')
data = numpy.loadtxt("wham_free.txt")[8:-8]
flatten(data)
corrected = data[:,1] + map(f, data[:,0])
rescale(corrected)
plt.plot(data[:,0], corrected, 'g-')
#x = numpy.arange(-1.5, 1.5, 0.05)
#y = x**2
#plt.plot(x, y)
plt.xlabel('x')
plt.ylabel('A')
plt.grid(True)
plotfname = "pmf.png"
pylab.savefig(plotfname)
| gpl-3.0 |
sumspr/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
huobaowangxi/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
DuCorey/bokeh | bokeh/core/json_encoder.py | 5 | 7177 | ''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import collections
import decimal
import json
import numpy as np
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_type, is_datetime_type, transform_series, transform_array
pd = import_optional('pandas')
rd = import_optional("dateutil.relativedelta")
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
''' The required ``default`` method for JSONEncoder subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, pretty=False, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissable." % name)
pretty = settings.pretty(pretty)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
| bsd-3-clause |
GuessWhoSamFoo/pandas | asv_bench/benchmarks/io/json.py | 5 | 4937 | import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, date_range, timedelta_range, concat, read_json
from ..pandas_vb_common import BaseIO
class ReadJSON(BaseIO):
fname = "__test__.json"
params = (['split', 'index', 'records'], ['int', 'datetime'])
param_names = ['orient', 'index']
def setup(self, orient, index):
N = 100000
indexes = {'int': np.arange(N),
'datetime': date_range('20000101', periods=N, freq='H')}
df = DataFrame(np.random.randn(N, 5),
columns=['float_{}'.format(i) for i in range(5)],
index=indexes[index])
df.to_json(self.fname, orient=orient)
def time_read_json(self, orient, index):
read_json(self.fname, orient=orient)
class ReadJSONLines(BaseIO):
fname = "__test_lines__.json"
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': np.arange(N),
'datetime': date_range('20000101', periods=N, freq='H')}
df = DataFrame(np.random.randn(N, 5),
columns=['float_{}'.format(i) for i in range(5)],
index=indexes[index])
df.to_json(self.fname, orient='records', lines=True)
def time_read_json_lines(self, index):
read_json(self.fname, orient='records', lines=True)
def time_read_json_lines_concat(self, index):
concat(read_json(self.fname, orient='records', lines=True,
chunksize=25000))
def peakmem_read_json_lines(self, index):
read_json(self.fname, orient='records', lines=True)
def peakmem_read_json_lines_concat(self, index):
concat(read_json(self.fname, orient='records', lines=True,
chunksize=25000))
class ToJSON(BaseIO):
fname = "__test__.json"
params = ['split', 'columns', 'index']
param_names = ['orient']
def setup(self, lines_orient):
N = 10**5
ncols = 5
index = date_range('20000101', periods=N, freq='H')
timedeltas = timedelta_range(start=1, periods=N, freq='s')
datetimes = date_range(start=1, periods=N, freq='s')
ints = np.random.randint(100000000, size=N)
floats = np.random.randn(N)
strings = tm.makeStringIndex(N)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame({'td_1': timedeltas,
'td_2': timedeltas,
'int_1': ints,
'int_2': ints,
'ts_1': datetimes,
'ts_2': datetimes},
index=index)
self.df_int_floats = DataFrame({'int_1': ints,
'int_2': ints,
'int_3': ints,
'float_1': floats,
'float_2': floats,
'float_3': floats},
index=index)
self.df_int_float_str = DataFrame({'int_1': ints,
'int_2': ints,
'float_1': floats,
'float_2': floats,
'str_1': strings,
'str_2': strings},
index=index)
def time_floats_with_int_index(self, orient):
self.df.to_json(self.fname, orient=orient)
def time_floats_with_dt_index(self, orient):
self.df_date_idx.to_json(self.fname, orient=orient)
def time_delta_int_tstamp(self, orient):
self.df_td_int_ts.to_json(self.fname, orient=orient)
def time_float_int(self, orient):
self.df_int_floats.to_json(self.fname, orient=orient)
def time_float_int_str(self, orient):
self.df_int_float_str.to_json(self.fname, orient=orient)
def time_floats_with_int_idex_lines(self, orient):
self.df.to_json(self.fname, orient='records', lines=True)
def time_floats_with_dt_index_lines(self, orient):
self.df_date_idx.to_json(self.fname, orient='records', lines=True)
def time_delta_int_tstamp_lines(self, orient):
self.df_td_int_ts.to_json(self.fname, orient='records', lines=True)
def time_float_int_lines(self, orient):
self.df_int_floats.to_json(self.fname, orient='records', lines=True)
def time_float_int_str_lines(self, orient):
self.df_int_float_str.to_json(self.fname, orient='records', lines=True)
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
themrmax/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 39 | 5425 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
yyjiang/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
jshiv/turntable | test/lib/python2.7/site-packages/numpy/fft/fftpack.py | 35 | 42179 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft)
def ifft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
| mit |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/moviepy/video/io/sliders.py | 16 | 2196 | import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
def sliders(f, sliders_properties, wait_for_validation = False):
""" A light GUI to manually explore and tune the outputs of
a function.
slider_properties is a list of dicts (arguments for Slider )
def volume(x,y,z):
return x*y*z
intervals = [ { 'label' : 'width', 'valmin': 1 , 'valmax': 5 },
{ 'label' : 'height', 'valmin': 1 , 'valmax': 5 },
{ 'label' : 'depth', 'valmin': 1 , 'valmax': 5 } ]
inputExplorer(volume,intervals)
"""
nVars = len(sliders_properties)
slider_width = 1.0/nVars
# CREATE THE CANVAS
figure,ax = plt.subplots(1)
figure.canvas.set_window_title( "Inputs for '%s'"%(f.func_name) )
# choose an appropriate height
width,height = figure.get_size_inches()
height = min(0.5*nVars,8)
figure.set_size_inches(width,height,forward = True)
# hide the axis
ax.set_frame_on(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# CREATE THE SLIDERS
sliders = []
for i, properties in enumerate(sliders_properties):
ax = plt.axes([0.1 , 0.95-0.9*(i+1)*slider_width,
0.8 , 0.8* slider_width])
if not isinstance(properties,dict):
properties =dict(zip(['label','valmin', 'valmax', 'valinit'],
properties))
sliders.append( Slider(ax=ax, **properties) )
# CREATE THE CALLBACK FUNCTIONS
def on_changed(event) :
res = f(*(s.val for s in sliders))
if res is not None:
print( res )
def on_key_press(event):
if event.key is 'enter':
on_changed(event)
figure.canvas.mpl_connect('key_press_event', on_key_press)
# AUTOMATIC UPDATE ?
if not wait_for_validation:
for s in sliders :
s.on_changed(on_changed)
# DISPLAY THE SLIDERS
plt.show()
| mit |
cmohl2013/permutation_test | permutation_test/functions.py | 1 | 14269 | #import pandas as pd
import numpy as np
import random
import permutation_test.ap as ap
#import warnings
from operator import mul
from fractions import Fraction
import functools
from operator import itemgetter
import collections
from numpy.random import normal
def permutationtest(data, ref_data, detailed=False, n_combinations_max=20000, verbose=True, n_bins=None):
'''
Christoph Moehl, Image and Data Analysis Facility, DZNE Bonn, Germany
christoph.moehl(at)dzne.de
Implementation of Fisher's permutation test.
If detailed is False, only (two-sided) p_value is returned,
i.e. the probability that data is not different from ref_data
If detailed is True, one-sided p values and histogram data of
mean differences is returned in a dict:
hist_data: distribution of mean differences for all permutations
p_value: two sided p_value (the probability that data is not
different from ref_data )
p_value_lower_than: the probability that mean of data is not
lower than mean of ref_data
p_value_greater_than: the probability that mean of data is
not grater than mean of ref_data
If the number of possible combinations is grater than n_combinations_max,
a random subsample of size n_combinations_max is taken for histogram calculation.
According to following publications:
Fisher, R. A. (1935). The design of experiments. 1935.
Oliver and Boyd, Edinburgh.
Ernst, M. D. (2004). Permutation methods: a basis for exact inference.
Statistical Science, 19(4), 676-685
'''
mean_diffs = getMeanDiffListForAllPermutations(data, ref_data\
, n_combinations_max=n_combinations_max)
print('nr of mean diffs: ' + str(len(mean_diffs)))
freq, vals, edges = getHistogramFreqAndCenters(mean_diffs, n_bins=n_bins)
if (not is_list_of_tuples(data)) and (not is_list_of_tuples(ref_data)):
#if data without error
mean_diff = getDiffOfMean(data, ref_data)
p_value_lower_than, p_value_greater_than = calc_pval(mean_diff, freq, vals)
else:
#if data with error
freq_s, _, _ = getHistForDatWithErr(data, ref_data, edges)
p_value_lower_than, p_value_greater_than = calc_pval_with_err(freq_s, freq, vals)
mean_diff = np.average(vals, weights=freq_s) #ca. mean diff for report
# cum_freq = np.cumsum(freq) * bin_width #cumulative histogram values
# greater_than_index = mean_diff > vals
# lower_than_index = mean_diff < vals
# if lower_than_index.all() and not greater_than_index.all():
# p_value_lower_than = 0
# p_value_greater_than = 1
# elif not lower_than_index.all() and greater_than_index.all():
# p_value_lower_than = 1
# p_value_greater_than = 0
# else:
# p_value_lower_than = cum_freq[lower_than_index][0]
# # print 'cum_freq[lower_than_index] :' + str(cum_freq[lower_than_index])
# p_value_greater_than = 1-cum_freq[greater_than_index][-1]
p_value = min((p_value_lower_than, p_value_greater_than))
if verbose:
p = ap.AFigure()
print('\n\n Distribution of mean differences')
print(p.plot(vals, freq, marker = '*'))
print('mean difference of tested dataset: ' + str(mean_diff))
print('p_value: ' + str(p_value))
print('p_lower_than (probability that mean of test data is not lower than mean of ref data): ' + str(p_value_lower_than))
print('p_value_greater_than (probability that mean of test data is not greater than mean of ref data): ' + str(p_value_greater_than))
if detailed:
result = {'hist_data' : (freq, vals)\
, 'mean_difference' : mean_diff\
, 'p_value' : p_value\
, 'p_value_lower_than' : p_value_lower_than\
, 'p_value_greater_than' : p_value_greater_than\
}
return result
return p_value
def calc_pval_with_err(freq_s, freq, vals):
bin_width = getBinWidth(vals)
cum_freq = np.cumsum(freq) * bin_width #cumulative histogram values
weights = freq_s * freq
p_value_lower_than = np.average(cum_freq, weights=weights)
p_value_greater_than = 1-p_value_lower_than
return p_value_lower_than, p_value_greater_than
def calc_pval(mean_diff, freq, vals):
bin_width = getBinWidth(vals)
#lower_edges = vals-bin_width*0.5
cum_freq = np.cumsum(freq) * bin_width #cumulative histogram values
#meandiff_index =
greater_than_index = mean_diff > vals
lower_than_index = mean_diff < vals
if lower_than_index.all() and not greater_than_index.all():
p_value_lower_than = 0
p_value_greater_than = 1
elif not lower_than_index.all() and greater_than_index.all():
p_value_lower_than = 1
p_value_greater_than = 0
else:
p_value_lower_than = cum_freq[lower_than_index][0]
# print 'cum_freq[lower_than_index] :' + str(cum_freq[lower_than_index])
p_value_greater_than = 1-cum_freq[greater_than_index][-1]
return p_value_lower_than, p_value_greater_than
def getBinWidth(vals):
if len(vals)<2:
return 1
return vals[1]-vals[0]
# def getPermutations(lst, num=None):
# if num is None:
# return list(itertools.permutations(lst, len(lst)))
# return list(itertools.permutations(lst, num))
def getHistForDatWithErr(lst_1, lst_2, bin_edges, n_samples=10000):
mdiffs = [getDiffOfMeanRandomized(lst_1, lst_2) for i in range(n_samples)]
freq, bin_centers, edge = getHistogramFreqAndCenters(mdiffs, bin_edges=bin_edges)
return freq, bin_centers, edge
def getDiffOfMeanRandomized(lst_1, lst_2):
'''
randomized diff of mean for list of tuples (vel, error)
is calculated.
'''
if not is_list_of_tuples(lst_1) or not is_list_of_tuples(lst_2):
raise ValueError('input must be lists of tuples but is %s and %s'\
, lst_1, lst_2)
lst_1r, lst_2r = randomize_permutation_data((lst_1, lst_2))
return getDiffOfMean(lst_1r, lst_2r)
def getDiffOfMean(lst_1, lst_2):
'''
result = mean(lst_1) - mean(lst_2)
'''
if (len(lst_1)==0) or (len(lst_2)==0):
raise ValueError('empty list')
out = np.nanmean(lst_1) - np.nanmean(lst_2)
if np.isnan(out):
raise ValueError('one or more of the average values are NaN')
return out
def is_list_of_tuples(data, n=2):
try:
nr_el = list(map(len, data))
is_list_of_tuples = (np.array(nr_el)==n).all()
except:
is_list_of_tuples = False
return is_list_of_tuples
def check_data_format(permutations):
'''
returns 1 if data points are single values
returns 2 if the data points are tuples with (value,error)
returns 0 if data structure is not valid
'''
has_tuples = False
has_single_vals = False
for perm in permutations:
if len(perm)!=2:
#only 2 groups allowed
return 0
for group in perm:
for d in group:
if isinstance(d, collections.Iterable):
if len(d)!=2:
return 0
has_tuples = True
else:
has_single_vals = True
if has_tuples and not has_single_vals:
return 2
if not has_tuples and has_single_vals:
return 1
if has_tuples and has_single_vals:
return 0
def randomize_data_point(datapoint):
val = datapoint[0]
err = datapoint[1]
return normal(val, err)
def randomize_permutation_data(perm):
a = [randomize_data_point(dat) for dat in perm[0]]
b = [randomize_data_point(dat) for dat in perm[1]]
return (a, b)
def getMeanDiffListForAllPermutations(lst_1, lst_2, n_combinations_max = 20000):
'''
lst_1 and lst_2 are fused and from all possible permutations,
mean differences are calculated.
if lst_1 and lst_2 are tuples (second value= statistical error)
, data is randomized, i.e. (val, err) tuple
is transformed into a normal distributed random variable with mu=val and sigma=err
'''
perms = getPerms(lst_1 + lst_2, len(lst_1), n_combinations_max=n_combinations_max)
format = check_data_format(perms)
if format == 1:
#if single values
mean_diffs = [getDiffOfMean(perm[0], perm[1]) for perm in perms]
return mean_diffs
if format == 2:
#if (value,error) tuples
perms_rand = [randomize_permutation_data(perm) for perm in perms]
mean_diffs = [getDiffOfMean(perm[0], perm[1]) for perm in perms_rand]
return mean_diffs
def permutations(n, g):
'''
returns a generator of permutations
n : number of elements (integer)
g : list of values to permute
example:
In [27]: import permutation_test as p
In [28]: perm_generator = p.permutations(3,[10,11,12,13])
In [29]: perms = list(perm_generator)
In [30]: perms
Out[30]: [[10, 11, 12], [10, 11, 13], [10, 12, 13], [11, 12, 13]]
'''
if n == 0:
yield []
for j, x in enumerate(g):
for v in permutations(n-1, g[j+1:]):
yield [x] + v
def getPerms(dat, n_of_group_a, n_combinations_max = 20000):
'''
combined permutation for 2 lists
dat: list to be permuted
n_of_group_a: index for splitting the list
If nr of possible combinations exceeds n_combinations_max, a random subsample
of size n_combinations_max is chosen.
example:
In [33]: p.getPerms([1,2,3,4,5],3)
Out[33]:
[([1, 2, 3], [4, 5]),
([1, 2, 4], [3, 5]),
([1, 2, 5], [3, 4]),
([1, 3, 4], [2, 5]),
([1, 3, 5], [2, 4]),
([1, 4, 5], [2, 3]),
([2, 3, 4], [1, 5]),
([2, 3, 5], [1, 4]),
([2, 4, 5], [1, 3]),
([3, 4, 5], [1, 2])]
'''
dat_index = range(len(dat))
#nr of possible combinations
n_combinations = nCk(len(dat_index), n_of_group_a)
if n_combinations < n_combinations_max:
perm_list = list(permutations(n_of_group_a, dat_index))
else:
perms_iter = permutations(n_of_group_a, dat_index)
print('taking random subsample of size %s from %s possible permutations' % (n_combinations_max, n_combinations))
perm_list = iter_sample_fast(perms_iter, n_combinations_max)
combi = []
for perm in perm_list:
group_b = [dat[i] for i in dat_index if not i in perm]
if len(group_b) == 0:
raise('gp')
group_a = list(itemgetter(*perm)(dat))
combi.append((group_a, group_b))
return combi
def calc_bin_number(data):
'''
optimal bin number according to Freedman-Diaconis rule
'''
if len(np.unique(data))==1:
#if all values in the dataset are identical
return 1
#inter quartile range
iqr = np.percentile(data, 75) - np.percentile(data, 25)
n = len(data)
h = 2 * iqr/(float(n)**(1.0/3.0)) #bin width
n_bins = int(round((max(data) - min(data))/h))
# print 'n: ' + str(n)
# print 'iqr: ' + str(iqr)
# print 'n bins: ' + str(n_bins)
return n_bins
def getHistogramFreqAndCenters(data, n_bins=None, bin_edges=None):
'''
calculation of histogram (frequency and bin positions) with auto bin number
'''
if n_bins is None:
n_bins = calc_bin_number(data)
if n_bins < 10:
warn_message = \
'WARNING: Bin number is only %s : statistical analysis might be affected.' % n_bins
print(warn_message)
#warnings.warn(warn_message, UserWarning)
if bin_edges is not None:
freq, bin_edges = np.histogram(data\
, bins=bin_edges[1:-1]\
, normed=True)
else:
freq, bin_edges = np.histogram(data\
, bins=n_bins\
, normed=True)
freq = np.concatenate([[0],freq,[0]])
bin_width = getBinWidth(bin_edges)
bin_edge_lowest = bin_edges[0] - bin_width
bin_edge_highest = bin_edges[-1] + bin_width
bin_edges = np.concatenate([[bin_edge_lowest],bin_edges,[bin_edge_highest]])
bin_centers = np.diff(bin_edges)/2 + bin_edges[:-1]
return (freq, bin_centers, bin_edges)
def iter_sample_fast(iterable, samplesize):
'''
random subsampling from iterator
code snippet copied from DzinX on StackOverflow:
http://stackoverflow.com/questions/12581437/python-random-sample-with-a-generator
'''
results = []
iterator = iterable#iter(iterable)
# Fill in the first samplesize elements:
try:
for _ in range(samplesize):
results.append(next(iterator))
except StopIteration:
raise ValueError("Sample larger than population.")
random.shuffle(results) # Randomize their positions
for i, v in enumerate(iterator, samplesize):
r = random.randint(0, i)
if r < samplesize:
results[r] = v # at a decreasing rate, replace random items
return results
def nCk(n,k):
'''
calculate number of combinations 'n over k' (binomial coefficient)
'''
return int( functools.reduce(mul, (Fraction(n-i, i+1) for i in list(range(k))), 1) )
def benjamini_hochberg_procedure(pvalues, alpha=0.05):
'''
returns a boolean array indicating if the null hypothesis of this test
can be rejected.
Controls the false-discovery rate
'''
pvalues = np.array(pvalues)
m = pvalues.size
ranks = np.argsort(pvalues)
inverse_ranks = np.zeros(m, dtype=int)
inverse_ranks[ranks] = np.arange(m)
sorted_pvalues = pvalues[ranks]
found = False
for k, p in enumerate(sorted_pvalues):
t = float(k+1)/m*alpha
#print(k+1, float(k+1)/m, p, t)
if p > t:
found = True
break
if not found:
k = m
reject_null_hypo = False*np.ones_like(pvalues, dtype=bool)
reject_null_hypo[:k] = True
return reject_null_hypo[inverse_ranks]
| mit |
samzhang111/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
bthirion/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 17 | 34869 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X : array_like
An array with shape (n_samples, n_features)
Returns
-------
D : array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
.. deprecated:: 0.18
This class will be removed in 0.20.
Use the :class:`GaussianProcessRegressor` instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state : int, RandomState instance or None, optional (default=None)
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is the
random number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
PythonCharmers/bokeh | bokeh/compat/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| bsd-3-clause |
prheenan/BioModel | BellZhurkov/Python/TestExamples/TestUtil/Bell_Test_Data.py | 1 | 2506 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
class BellData:
def __init__(self,forces,RatesFold,RatesUnfold=None):
self.Forces = forces
self.RatesFold = RatesFold
self.RatesUnfold=RatesUnfold
def Schlierf2006Figure1a():
"""
From
Schlierf, Michael, and Matthias Rief.
"Single-Molecule Unfolding Force Distributions Reveal a Funnel-Shaped
Energy Landscape."
Biophysical Journal 90, no. 4 (February 15, 2006)
"""
# pulling velocities (x axis of figure 1a)
Velocities = np.array([0.2,0.4,0.8,2,4]) * 1e-6
# unfolding forces (y axis of figure 1a)
UnfoldingForces = np.array([42,46,52,62,68]) * 1e-12
return BellData(UnfoldingForces,Velocities)
def Woodside2014FoldingAndUnfoldingData():
"""
From:
Woodside, Michael T., and Steven M. Block.
"Reconstructing Folding Energy Landscapes by Single-Molecule Force Spectroscopy"
Annual Review of Biophysics 43, no. 1 (2014): 19-39.
doi:10.1146/annurev-biophys-051013-022754.
"""
# write down figure 6a, pp 28
Forces = [11.25,
11.50,
11.75,
12.0,
12.25,
12.75,
13.25,
13.5,
13.75,
14.25,
14.75]
Forces= np.array(Forces)
# write down the folding and unfolding rates as decaying/increasing
# exponentials, based on their starting value in the graph
ForceDiff = max(Forces)-min(Forces)
# we have
# y ~ exp(+/- t/tau)
# so y_f/y_i = exp(+/- (t_f-t_i)/tau)
# so tau = +/- np.log(yf/y_i)/(tf-t_i). aparently the signs work themselves
# out the way I use tau below
yf = 300
yi = 8
# get the 'taus' for the exponential decay (really inverse forces)
tauFold = np.log(yf/yi)/(ForceDiff)
tauUnfold = np.log(yf/yi)/(ForceDiff)
# get the offset force array; use this to set up the data, which we
# assume decays or increases from our 'zero point' (first measured data)
forceOffset = (Forces-Forces[0])
# folding is decaying / decaying / *has* minus sign
Folding = yf * np.exp( -forceOffset/tauFold )
# unfolding is increasing / growing / *doesnt have* minus sign
Unfolding = yi * np.exp( forceOffset/tauUnfold)
return BellData(Forces*1e-12,Folding,Unfolding)
| gpl-2.0 |
sgenoud/scikit-learn | examples/svm/plot_svm_nonlinear.py | 5 | 1067 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learn by the SVC.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=pl.cm.PuOr_r)
contours = pl.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
pl.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=pl.cm.Paired)
pl.xticks(())
pl.yticks(())
pl.axis([-3, 3, -3, 3])
pl.show()
| bsd-3-clause |
gpospelov/BornAgain | devtools/mpi/pytests/mpi_cpp_test.py | 3 | 2236 | # Mixture of cylinders and prisms without interference
from mpi4py import MPI # this line has to be first
import numpy
import matplotlib
import pylab
from libBornAgainCore import *
comm = MPI.COMM_WORLD
world_size = comm.Get_size()
world_rank = comm.Get_rank()
def get_sample():
"""
Build and return the sample representing cylinders and pyramids on top of
substrate without interference.
"""
# defining materials
m_air = HomogeneousMaterial("Air", 0.0, 0.0)
m_substrate = HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
cylinder_ff = FormFactorCylinder(5*nanometer, 5*nanometer)
cylinder = Particle(m_particle, cylinder_ff)
prism_ff = FormFactorPrism3(10*nanometer, 5*nanometer)
prism = Particle(m_particle, prism_ff)
particle_layout = ParticleLayout()
particle_layout.addParticle(cylinder, 0.5)
particle_layout.addParticle(prism, 0.5)
# air layer with particles and substrate form multi layer
air_layer = Layer(m_air)
air_layer.addLayout(particle_layout)
substrate_layer = Layer(m_substrate)
multi_layer = MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation():
"""
Create and return GISAS simulation with beam and detector defined
"""
simulation = GISASSimulation()
simulation.setDetectorParameters(100, -1.0*degree, 1.0*degree, 100, 0.0*degree, 2.0*degree)
simulation.setBeamParameters(1.0*angstrom, 0.2*degree, 0.0*degree)
sample = get_sample()
simulation.setSample(sample)
return simulation
def run_simulation():
"""
Run simulation and plot results
"""
simulation = get_simulation()
if(world_size == 1):
print("Not an MPI environment, run with 'mpirun -n 4 python ompi_sim_example.py'")
exit(0)
simulation.runMPISimulation()
if(world_rank == 0):
sumresult = simulation.result().array()
print(sumresult)
# pylab.imshow(sumresult + 1, norm=matplotlib.colors.LogNorm(), extent=[-1.0, 1.0, 0, 2.0])
# pylab.show()
if __name__ == '__main__':
run_simulation()
| gpl-3.0 |
mardom/GalSim | devel/external/test_cf/test_cf.py | 1 | 3573 | # Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
# Just some temporary testing stuff
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
import galsim
IMSIZE=40
# First make a noise field with an even number of elements
enoise = galsim.ImageViewD(np.random.randn(IMSIZE, IMSIZE))
encf = galsim.correlatednoise.CorrFunc(enoise)
print encf.SBProfile.xValue(galsim.PositionD(0, 0))
# Then make a noise field with an odd number of elements
onoise = galsim.ImageViewD(np.random.randn(IMSIZE + 1, IMSIZE + 1))
oncf = galsim.correlatednoise.CorrFunc(onoise)
print oncf.SBProfile.xValue(galsim.PositionD(0, 0))
testim = galsim.ImageD(10, 10)
cv = encf.SBProfile.getCovarianceMatrix(testim.view(), dx=1.)
#plt.pcolor(cv.array); plt.colorbar()
# Construct an image with noise correlated in the y direction
plt.figure()
ynoise = galsim.ImageViewD(enoise.array[:, :] + np.roll(enoise.array, 1, axis=0))
plt.pcolor(ynoise.array); plt.colorbar()
plt.title('Noise correlated in y direction')
plt.savefig('ynoise.png')
yncf = galsim.correlatednoise.CorrFunc(ynoise)
yim = galsim.ImageD(IMSIZE, IMSIZE)
yncf.draw(yim, dx=1.)
plt.figure()
plt.pcolor(yim.array); plt.colorbar()
plt.title('CorrFunc with noise correlated in y direction')
plt.savefig('ycorrfunc.png')
ycv = yncf.SBProfile.getCovarianceMatrix(testim.view(), dx=1.)
plt.figure()
plt.pcolor(ycv.array); plt.colorbar()
plt.title('CorrFunc.getCovarianceMatrix() with noise correlated in y direction')
plt.savefig('ycovmatrix.png')
# Then construct an image with noise correlated in the x direction
plt.figure()
xnoise = galsim.ImageViewD(enoise.array[:, :] + np.roll(enoise.array, 1, axis=1))
plt.pcolor(xnoise.array); plt.colorbar()
plt.title('Noise correlated in x direction')
plt.savefig('xnoise.png')
xncf = galsim.correlatednoise.CorrFunc(xnoise)
xim = galsim.ImageD(IMSIZE, IMSIZE)
xncf.draw(xim, dx=1.)
plt.figure()
plt.pcolor(xim.array); plt.colorbar()
plt.title('CorrFunc with noise correlated in x direction')
plt.savefig('xcorrfunc.png')
xcv = xncf.SBProfile.getCovarianceMatrix(testim.view(), dx=1.)
plt.figure()
plt.pcolor(xcv.array); plt.colorbar()
plt.title('CorrFunc.getCovarianceMatrix() with noise correlated in x direction')
plt.savefig('xcovmatrix.png')
# Plot a hires, rotated, ycf
yim2 = galsim.ImageD(IMSIZE * 3, IMSIZE * 3)
yncf2 = yncf.createRotated(20. * galsim.degrees)
yncf2.draw(yim2, dx=0.4)
plt.figure()
plt.pcolor(yim2.array); plt.colorbar()
plt.title('CorrFunc with noise correlated in y direction (at 0.4 pix resolution, rotated by 20 deg)')
plt.savefig('ycorrfunc_hires_rot20.png')
| gpl-3.0 |
akshayparopkari/phylotoast | bin/LDA.py | 2 | 11875 | #!/usr/bin/env python
"""
Abstract: This script calculates and returns LDA plots based on normalized relative
abundances or distance matrices (for e.g. unifrac distance matrix).
"""
import sys
import argparse
from itertools import cycle
from phylotoast import util, biom_calc as bc, graph_util as gu
errors = []
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
mpl.rc("font", family="Arial") # define font for figure text
except ImportError as ie:
errors.append(ie)
try:
import numpy as np
except ImportError as ie:
errors.append(ie)
try:
import pandas as pd
except ImportError as ie:
errors.append(ie)
try:
import biom
except ImportError as ie:
errors.append(ie)
try:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
except ImportError as ie:
errors.append(ie)
try:
from palettable.colorbrewer.qualitative import Set3_12
except ImportError as ie:
errors.append("No module named palettable")
if len(errors) != 0:
for item in errors:
print("Import Error:", item)
sys.exit()
def plot_LDA(X_lda, y_lda, class_colors, exp_var, style, fig_size, label_pad,
font_size, sids, dim=2, zangles=None, pt_size=250, out_fp=""):
"""
Plot transformed LDA data.
"""
cats = class_colors.keys()
fig = plt.figure(figsize=fig_size)
if dim == 3:
try:
assert X_lda.shape[1] >= 3
except AssertionError:
sys.exit("\nLinear Discriminant Analysis requires at least 4 groups of "
"samples to create a 3D figure. Please update group information or "
"use the default 2D view of the results.\n")
if sids is not None:
print("\nPoint annotations are available only for 2D figures.\n")
ax = fig.add_subplot(111, projection="3d")
ax.view_init(elev=zangles[1], azim=zangles[0])
try:
ax.set_zlabel("LD3 (Percent Explained Variance: {:.3f}%)".
format(exp_var[2]*100), fontsize=font_size, labelpad=label_pad)
except:
ax.set_zlabel("LD3", fontsize=font_size, labelpad=label_pad)
for i, target_name in zip(range(len(cats)), cats):
cat_x = X_lda[:, 0][y_lda == target_name]
cat_y = X_lda[:, 1][y_lda == target_name]
cat_z = X_lda[:, 2][y_lda == target_name]
ax.scatter(xs=cat_x, ys=cat_y, zs=cat_z, label=target_name,
c=class_colors[target_name], alpha=0.85, s=pt_size, edgecolors="k",
zdir="z")
else:
ax = fig.add_subplot(111)
for i, target_name in zip(range(len(cats)), cats):
cat_x = X_lda[:, 0][y_lda == target_name]
if X_lda.shape[1] == 1:
cat_y = np.ones((cat_x.shape[0], 1)) + i
else:
cat_y = X_lda[:, 1][y_lda == target_name]
ax.scatter(x=cat_x, y=cat_y, label=target_name, alpha=0.85, s=pt_size,
color=class_colors[target_name], edgecolors="k")
# Annotate data points with sample IDs
if sids is not None:
for sample, point, group in zip(sids, X_lda, y_lda):
try:
assert len(point) >= 2
except AssertionError:
point = (point[0], cats.index(group)+1)
finally:
ax.annotate(s=sample, xy=point[:2], xytext=(0, -15), ha="center",
va="center", textcoords="offset points")
if X_lda.shape[1] == 1:
plt.ylim((0.5, 2.5))
try:
ax.set_xlabel("LD1 (Percent Explained Variance: {:.3f}%)".
format(exp_var[0]*100), fontsize=font_size, labelpad=label_pad)
except:
ax.set_xlabel("LD1", fontsize=font_size, labelpad=label_pad)
try:
ax.set_ylabel("LD2 (Percent Explained Variance: {:.3f}%)".
format(exp_var[1]*100), fontsize=font_size, labelpad=label_pad)
except:
ax.set_ylabel("LD2", fontsize=font_size, labelpad=label_pad)
leg = plt.legend(loc="best", scatterpoints=3, frameon=True, framealpha=1, fontsize=15)
leg.get_frame().set_edgecolor('k')
if dim == 2 and style:
gu.ggplot2_style(ax)
fc = "0.8"
else:
fc = "none"
# save or display result
if out_fp:
plt.savefig(out_fp, facecolor=fc, edgecolor="none", dpi=300, bbox_inches="tight",
pad_inches=0.1)
else:
plt.show()
def run_LDA(df):
"""
Run LinearDiscriminantAnalysis on input dataframe (df) and return
transformed data, scalings and explained variance by discriminants.
"""
# Prep variables for sklearn LDA
X = df.iloc[:, 1:df.shape[1]].values # input data matrix
y = df["Condition"].values # data categories list
# Calculate LDA
sklearn_lda = LDA()
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
try:
exp_var = sklearn_lda.explained_variance_ratio_
except AttributeError as ae:
print("\n{}: explained variance cannot be computed.\nPlease check this GitHub PR:"
" https://github.com/scikit-learn/scikit-learn/pull/6027".format(ae))
return X_lda_sklearn, y, "NA"
return X_lda_sklearn, y, exp_var
def handle_program_options():
parser = argparse.ArgumentParser(description="This script calculates and returns LDA "
"plots based on normalized relative "
"abundances or distance matrices "
"(for e.g. unifrac distance matrix).")
parser.add_argument("-m", "--map_fp", required=True,
help="Metadata mapping file. [REQUIRED]")
parser.add_argument("-g", "--group_by", required=True,
help="A column name in the mapping file containing categorical "
"values that will be used to identify groups. Each sample "
"ID must have a group entry. Default is no categories and "
"all the data will be treated as a single group. [REQUIRED]")
parser.add_argument("-c", "--colors",
help="A column name in the mapping file containing hexadecimal "
"(#FF0000) color values that will be used to color the "
"groups. Each sample ID must have a color entry.")
parser.add_argument("-ot", "--otu_table", help="Input biom file format OTU table.")
parser.add_argument("-dm", "--dist_matrix_file", help="Input distance matrix file.")
parser.add_argument("--save_lda_input",
help="Save a CSV-format file of the transposed LDA-input table to"
" the file specifed by this option.")
parser.add_argument("--plot_title", default=None,
help="Plot title. Default is no title.")
parser.add_argument("-o", "--out_fp", default="",
help="The path and file name to save the LDA plot under. If "
"specified, the figure will be saved directly instead of "
"opening a window in which the plot can be viewed before "
"saving.")
parser.add_argument("-d", "--dimensions", default=2, type=int, choices=[2, 3],
help="Choose whether to plot 2D or 3D. Default is a 2D view.")
parser.add_argument("-s", "--point_size", default=100, type=int,
help="Specify the size of the circles representing each of the "
"samples in the plot")
parser.add_argument("--z_angles", type=float, nargs=2, default=[45., 30.],
help="Specify the azimuth and elevation angles for a 3D plot.")
parser.add_argument("--figsize", default=[14, 8], type=int, nargs=2,
help="Specify the 'width height' in inches for LDA plots. Default"
" figure size is 14x8 inches.")
parser.add_argument("--font_size", default=12, type=int,
help="Sets the font size for text elements in the plot.")
parser.add_argument("--label_padding", default=15, type=int,
help="Sets the spacing in points between the each axis and its "
"label.")
parser.add_argument("--annotate_points", action="store_true",
help="If specified, each data point will be labeled with its "
"sample ID. Default is False.")
parser.add_argument("--ggplot2_style", action="store_true",
help="Apply ggplot2 styling to the figure. Default is False.")
return parser.parse_args()
def main():
args = handle_program_options()
# Parse and read mapping file
try:
header, imap = util.parse_map_file(args.map_fp)
category_idx = header.index(args.group_by)
except IOError as ioe:
err_msg = "\nError in metadata mapping filepath (-m): {}\n"
sys.exit(err_msg.format(ioe))
# Obtain group colors
try:
assert args.colors is not None
except AssertionError:
categories = {v[category_idx] for k, v in imap.items()}
color_cycle = cycle(Set3_12.hex_colors)
class_colors = {c: color_cycle.next() for c in categories}
else:
class_colors = util.color_mapping(imap, header, args.group_by, args.colors)
if args.dist_matrix_file:
try:
dm_data = pd.read_csv(args.dist_matrix_file, sep="\t", index_col=0)
except IOError as ioe:
err_msg = "\nError with unifrac distance matrix file (-d): {}\n"
sys.exit(err_msg.format(ioe))
dm_data.insert(0, "Condition", [imap[str(sid)][category_idx] for sid in dm_data.index])
if args.annotate_points:
sampleids = [str(sid) for sid in dm_data.index]
else:
sampleids = None
if args.save_lda_input:
dm_data.to_csv(args.save_lda_input, sep="\t")
# Run LDA
X_lda, y_lda, exp_var = run_LDA(dm_data)
else:
# Load biom file and calculate relative abundance
try:
biomf = biom.load_table(args.otu_table)
except IOError as ioe:
err_msg = "\nError with biom format file (-d): {}\n"
sys.exit(err_msg.format(ioe))
# Get normalized relative abundances
rel_abd = bc.relative_abundance(biomf)
rel_abd = bc.arcsine_sqrt_transform(rel_abd)
df_rel_abd = pd.DataFrame(rel_abd).T
df_rel_abd.insert(0, "Condition", [imap[sid][category_idx]
for sid in df_rel_abd.index])
if args.annotate_points:
sampleids = df_rel_abd.index
else:
sampleids = None
if args.save_lda_input:
df_rel_abd.to_csv(args.save_lda_input, sep="\t")
# Run LDA
X_lda, y_lda, exp_var = run_LDA(df_rel_abd)
# Plot LDA
if args.dimensions == 3:
plot_LDA(X_lda, y_lda, class_colors, exp_var, style=args.ggplot2_style,
fig_size=args.figsize, label_pad=args.label_padding,
font_size=args.font_size, sids=sampleids, dim=3,
zangles=args.z_angles, pt_size=args.point_size, out_fp=args.out_fp)
else:
plot_LDA(X_lda, y_lda, class_colors, exp_var, style=args.ggplot2_style,
fig_size=args.figsize, label_pad=args.label_padding,
font_size=args.font_size, sids=sampleids, pt_size=args.point_size,
out_fp=args.out_fp)
if __name__ == "__main__":
sys.exit(main())
| mit |
gengliangwang/spark | python/pyspark/pandas/internal.py | 1 | 57972 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An internal immutable DataFrame with some metadata to manage indexes.
"""
import re
from typing import Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast
from itertools import accumulate
import py4j
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype, is_datetime64_dtype, is_datetime64tz_dtype
from pyspark import sql as spark
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Window
from pyspark.sql.functions import PandasUDFType, pandas_udf
from pyspark.sql.types import BooleanType, DataType, StructField, StructType, LongType
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.config import get_option
from pyspark.pandas.typedef import (
Dtype,
as_spark_type,
extension_dtypes,
infer_pd_series_spark_type,
spark_type_to_pandas_dtype,
)
from pyspark.pandas.utils import (
column_labels_level,
default_session,
is_name_like_tuple,
is_testing,
lazy_property,
name_like_string,
scol_for,
spark_column_equals,
verify_temp_column_name,
)
# A function to turn given numbers to Spark columns that represent pandas-on-Spark index.
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
# A pattern to check if the name of a Spark column is a pandas-on-Spark index name or not.
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
DEFAULT_SERIES_NAME = 0
SPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)
class InternalFrame(object):
"""
The internal immutable DataFrame which manages Spark DataFrame and column names and index
information.
.. note:: this is an internal class. It is not supposed to be exposed to users and users
should not directly access to it.
The internal immutable DataFrame represents the index information for a DataFrame it belongs to.
For instance, if we have a pandas-on-Spark DataFrame as below, pandas DataFrame does not
store the index as columns.
>>> psdf = ps.DataFrame({
... 'A': [1, 2, 3, 4],
... 'B': [5, 6, 7, 8],
... 'C': [9, 10, 11, 12],
... 'D': [13, 14, 15, 16],
... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])
>>> psdf # doctest: +NORMALIZE_WHITESPACE
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
However, all columns including index column are also stored in Spark DataFrame internally
as below.
>>> psdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
In order to fill this gap, the current metadata is used by mapping Spark's internal column
to pandas-on-Spark's index. See the method below:
* `spark_frame` represents the internal Spark DataFrame
* `data_spark_column_names` represents non-indexing Spark column names
* `data_spark_columns` represents non-indexing Spark columns
* `data_dtypes` represents external non-indexing dtypes
* `index_spark_column_names` represents internal index Spark column names
* `index_spark_columns` represents internal index Spark columns
* `index_dtypes` represents external index dtypes
* `spark_column_names` represents all columns
* `index_names` represents the external index name as a label
* `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.
* `to_pandas_frame` represents pandas DataFrame derived by the metadata
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]
>>> internal.index_dtypes
[dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
In case that index is set to one of the existing column as below:
>>> psdf1 = psdf.set_index("A")
>>> psdf1 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
>>> psdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal = psdf1._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[('A',)]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]
>>> internal.index_dtypes
[dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
In case that index becomes a multi index as below:
>>> psdf2 = psdf.set_index("A", append=True)
>>> psdf2 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
>>> psdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal = psdf2._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__', 'A']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None, ('A',)]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]
>>> internal.index_dtypes
[dtype('int64'), dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
For multi-level columns, it also holds column_labels
>>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),
... ('Y', 'C'), ('Y', 'D')])
>>> psdf3 = ps.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16],
... [17, 18, 19, 20]], columns = columns)
>>> psdf3 # doctest: +NORMALIZE_WHITESPACE
X Y
A B C D
0 1 2 3 4
1 5 6 7 8
2 9 10 11 12
3 13 14 15 16
4 17 18 19 20
>>> internal = psdf3._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+------+------+------+------+-----------------+
|__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|
+-----------------+------+------+------+------+-----------------+
| 0| 1| 2| 3| 4| ...|
| 1| 5| 6| 7| 8| ...|
| 2| 9| 10| 11| 12| ...|
| 3| 13| 14| 15| 16| ...|
| 4| 17| 18| 19| 20| ...|
+-----------------+------+------+------+------+-----------------+
>>> internal.data_spark_column_names
['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']
>>> internal.column_labels
[('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]
For Series, it also holds scol to represent the column.
>>> psseries = psdf1.B
>>> psseries
A
1 5
2 6
3 7
4 8
Name: B, dtype: int64
>>> internal = psseries._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B']
>>> internal.index_names
[('A',)]
>>> internal.data_dtypes
[dtype('int64')]
>>> internal.index_dtypes
[dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+
| A| B|
+---+---+
| 1| 5|
| 2| 6|
| 3| 7|
| 4| 8|
+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B
A
1 5
2 6
3 7
4 8
"""
def __init__(
self,
spark_frame: spark.DataFrame,
index_spark_columns: Optional[List[spark.Column]],
index_names: Optional[List[Optional[Tuple]]] = None,
index_dtypes: Optional[List[Dtype]] = None,
column_labels: Optional[List[Tuple]] = None,
data_spark_columns: Optional[List[spark.Column]] = None,
data_dtypes: Optional[List[Dtype]] = None,
column_label_names: Optional[List[Optional[Tuple]]] = None,
):
"""
Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and
index fields and names.
:param spark_frame: Spark DataFrame to be managed.
:param index_spark_columns: list of Spark Column
Spark Columns for the index.
:param index_names: list of tuples
the index names.
:param index_dtypes: list of dtypes
the index dtypes.
:param column_labels: list of tuples with the same length
The multi-level values in the tuples.
:param data_spark_columns: list of Spark Column
Spark Columns to appear as columns. If this is None, calculated
from spark_frame.
:param data_dtypes: list of dtypes.
the data dtypes.
:param column_label_names: Names for each of the column index levels.
See the examples below to refer what each parameter means.
>>> column_labels = pd.MultiIndex.from_tuples(
... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"])
>>> row_index = pd.MultiIndex.from_tuples(
... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],
... names=["row_index_a", "row_index_b"])
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)
>>> psdf.set_index(('a', 'x'), append=True, inplace=True)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
column_labels_a a b
column_labels_b y z
row_index_a row_index_b (a, x)
foo bar 1 2 3
4 5 6
zoo bar 7 8 9
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+-----------------+------+------+------+...
|__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...
+-----------------+-----------------+------+------+------+...
| foo| bar| 1| 2| 3|...
| foo| bar| 4| 5| 6|...
| zoo| bar| 7| 8| 9|...
+-----------------+-----------------+------+------+------+...
>>> internal.index_spark_columns # doctest: +SKIP
[Column<'__index_level_0__'>, Column<'__index_level_1__'>, Column<'(a, x)'>]
>>> internal.index_names
[('row_index_a',), ('row_index_b',), ('a', 'x')]
>>> internal.index_dtypes
[dtype('O'), dtype('O'), dtype('int64')]
>>> internal.column_labels
[('a', 'y'), ('b', 'z')]
>>> internal.data_spark_columns # doctest: +SKIP
[Column<'(a, y)'>, Column<'(b, z)'>]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64')]
>>> internal.column_label_names
[('column_labels_a',), ('column_labels_b',)]
"""
assert isinstance(spark_frame, spark.DataFrame)
assert not spark_frame.isStreaming, "pandas-on-Spark does not support Structured Streaming."
if not index_spark_columns:
if data_spark_columns is not None:
if column_labels is not None:
data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(data_spark_columns, column_labels)
]
spark_frame = spark_frame.select(data_spark_columns)
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
# Create default index.
spark_frame = InternalFrame.attach_default_index(spark_frame)
index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]
if data_spark_columns is not None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if col != SPARK_DEFAULT_INDEX_NAME
]
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
self._sdf = spark_frame # type: spark.DataFrame
# index_spark_columns
assert all(
isinstance(index_scol, spark.Column) for index_scol in index_spark_columns
), index_spark_columns
self._index_spark_columns = index_spark_columns # type: List[spark.Column]
# index_names
if not index_names:
index_names = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_names), (
len(index_spark_columns),
len(index_names),
)
assert all(
is_name_like_tuple(index_name, check_type=True) for index_name in index_names
), index_names
self._index_names = index_names # type: List[Optional[Tuple]]
# index_dtypes
if not index_dtypes:
index_dtypes = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_dtypes), (
len(index_spark_columns),
len(index_dtypes),
)
index_dtypes = [
spark_type_to_pandas_dtype(spark_frame.select(scol).schema[0].dataType)
if dtype is None or dtype == np.dtype("object")
else dtype
for dtype, scol in zip(index_dtypes, index_spark_columns)
]
assert all(
isinstance(dtype, Dtype.__args__) # type: ignore
and (dtype == np.dtype("object") or as_spark_type(dtype, raise_error=False) is not None)
for dtype in index_dtypes
), index_dtypes
self._index_dtypes = index_dtypes # type: List[Dtype]
# data_spark-columns
if data_spark_columns is None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if all(
not spark_column_equals(scol_for(spark_frame, col), index_scol)
for index_scol in index_spark_columns
)
and col not in HIDDEN_COLUMNS
]
self._data_spark_columns = data_spark_columns # type: List[spark.Column]
else:
assert all(isinstance(scol, spark.Column) for scol in data_spark_columns)
self._data_spark_columns = data_spark_columns
# column_labels
if column_labels is None:
self._column_labels = [
(col,) for col in spark_frame.select(self._data_spark_columns).columns
] # type: List[Tuple]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
if len(column_labels) == 1:
column_label = column_labels[0]
assert is_name_like_tuple(column_label, check_type=True), column_label
else:
assert all(
is_name_like_tuple(column_label, check_type=True)
for column_label in column_labels
), column_labels
assert len(set(len(label) for label in column_labels)) <= 1, column_labels
self._column_labels = column_labels
# data_dtypes
if not data_dtypes:
data_dtypes = [None] * len(data_spark_columns)
assert len(data_spark_columns) == len(data_dtypes), (
len(data_spark_columns),
len(data_dtypes),
)
data_dtypes = [
spark_type_to_pandas_dtype(spark_frame.select(scol).schema[0].dataType)
if dtype is None or dtype == np.dtype("object")
else dtype
for dtype, scol in zip(data_dtypes, data_spark_columns)
]
assert all(
isinstance(dtype, Dtype.__args__) # type: ignore
and (dtype == np.dtype("object") or as_spark_type(dtype, raise_error=False) is not None)
for dtype in data_dtypes
), data_dtypes
self._data_dtypes = data_dtypes # type: List[Dtype]
# column_label_names
if column_label_names is None:
self._column_label_names = [None] * column_labels_level(
self._column_labels
) # type: List[Optional[Tuple]]
else:
if len(self._column_labels) > 0:
assert len(column_label_names) == column_labels_level(self._column_labels), (
len(column_label_names),
column_labels_level(self._column_labels),
)
else:
assert len(column_label_names) > 0, len(column_label_names)
assert all(
is_name_like_tuple(column_label_name, check_type=True)
for column_label_name in column_label_names
), column_label_names
self._column_label_names = column_label_names
@staticmethod
def attach_default_index(
sdf: spark.DataFrame, default_index_type: Optional[str] = None
) -> spark.DataFrame:
"""
This method attaches a default index to Spark DataFrame. Spark does not have the index
notion so corresponding column should be generated.
There are several types of default index can be configured by `compute.default_index_type`.
>>> spark_frame = ps.range(10).to_spark()
>>> spark_frame
DataFrame[id: bigint]
It adds the default index column '__index_level_0__'.
>>> spark_frame = InternalFrame.attach_default_index(spark_frame)
>>> spark_frame
DataFrame[__index_level_0__: bigint, id: bigint]
It throws an exception if the given column name already exists.
>>> InternalFrame.attach_default_index(spark_frame)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: '__index_level_0__' already exists...
"""
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = get_option("compute.default_index_type")
if default_index_type == "sequence":
return InternalFrame.attach_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_sequence_column(sdf: spark.DataFrame, column_name: str) -> spark.DataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast("long") - 1
)
return sdf.select(sequential_index.alias(column_name), *scols)
@staticmethod
def attach_distributed_column(sdf: spark.DataFrame, column_name: str) -> spark.DataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)
@staticmethod
def attach_distributed_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> spark.DataFrame:
"""
This method attaches a Spark column that has a sequence in a distributed manner.
This is equivalent to the column assigned when default index type 'distributed-sequence'.
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
if len(sdf.columns) > 0:
try:
jdf = sdf._jdf.toDF() # type: ignore
sql_ctx = sdf.sql_ctx
encoders = sql_ctx._jvm.org.apache.spark.sql.Encoders # type: ignore
encoder = encoders.tuple(jdf.exprEnc(), encoders.scalaLong())
jrdd = jdf.localCheckpoint(False).rdd().zipWithIndex()
df = spark.DataFrame(
sql_ctx.sparkSession._jsparkSession.createDataset( # type: ignore
jrdd, encoder
).toDF(),
sql_ctx,
)
columns = df.columns
return df.selectExpr(
"`{}` as `{}`".format(columns[1], column_name), "`{}`.*".format(columns[0])
)
except py4j.protocol.Py4JError:
if is_testing():
raise
return InternalFrame._attach_distributed_sequence_column(sdf, column_name)
else:
cnt = sdf.count()
if cnt > 0:
return default_session().range(cnt).toDF(column_name)
else:
return default_session().createDataFrame(
[], schema=StructType().add(column_name, data_type=LongType(), nullable=False)
)
@staticmethod
def _attach_distributed_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> spark.DataFrame:
"""
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame._attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.sort("sequence").show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
scols = [scol_for(sdf, column) for column in sdf.columns]
spark_partition_column = verify_temp_column_name(sdf, "__spark_partition_id__")
offset_column = verify_temp_column_name(sdf, "__offset__")
row_number_column = verify_temp_column_name(sdf, "__row_number__")
# 1. Calculates counts per each partition ID. `counts` here is, for instance,
# {
# 1: 83,
# 6: 83,
# 3: 83,
# ...
# }
sdf = sdf.withColumn(spark_partition_column, F.spark_partition_id())
# Checkpoint the DataFrame to fix the partition ID.
sdf = sdf.localCheckpoint(eager=False)
counts = map(
lambda x: (x["key"], x["count"]),
sdf.groupby(sdf[spark_partition_column].alias("key")).count().collect(),
)
# 2. Calculates cumulative sum in an order of partition id.
# Note that it does not matter if partition id guarantees its order or not.
# We just need a one-by-one sequential id.
# sort by partition key.
sorted_counts = sorted(counts, key=lambda x: x[0])
# get cumulative sum in an order of partition key.
cumulative_counts = [0] + list(accumulate(map(lambda count: count[1], sorted_counts)))
# zip it with partition key.
sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts))
# 3. Attach offset for each partition.
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def offset(id: pd.Series) -> pd.Series:
current_partition_offset = sums[id.iloc[0]]
return pd.Series(current_partition_offset).repeat(len(id))
sdf = sdf.withColumn(offset_column, offset(spark_partition_column))
# 4. Calculate row_number in each partition.
w = Window.partitionBy(spark_partition_column).orderBy(F.monotonically_increasing_id())
row_number = F.row_number().over(w)
sdf = sdf.withColumn(row_number_column, row_number)
# 5. Calculate the index.
return sdf.select(
(sdf[offset_column] + sdf[row_number_column] - 1).alias(column_name), *scols
)
def spark_column_for(self, label: Tuple) -> spark.Column:
""" Return Spark Column for the given column label. """
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label]
else:
raise KeyError(name_like_string(label))
def spark_column_name_for(self, label_or_scol: Union[Tuple, spark.Column]) -> str:
""" Return the actual Spark column name for the given column label. """
if isinstance(label_or_scol, spark.Column):
scol = label_or_scol
else:
scol = self.spark_column_for(label_or_scol)
return self.spark_frame.select(scol).columns[0]
def spark_type_for(self, label_or_scol: Union[Tuple, spark.Column]) -> DataType:
""" Return DataType for the given column label. """
if isinstance(label_or_scol, spark.Column):
scol = label_or_scol
else:
scol = self.spark_column_for(label_or_scol)
return self.spark_frame.select(scol).schema[0].dataType
def spark_column_nullable_for(self, label_or_scol: Union[Tuple, spark.Column]) -> bool:
""" Return nullability for the given column label. """
if isinstance(label_or_scol, spark.Column):
scol = label_or_scol
else:
scol = self.spark_column_for(label_or_scol)
return self.spark_frame.select(scol).schema[0].nullable
def dtype_for(self, label: Tuple) -> Dtype:
""" Return dtype for the given column label. """
column_labels_to_dtype = dict(zip(self.column_labels, self.data_dtypes))
if label in column_labels_to_dtype:
return column_labels_to_dtype[label]
else:
raise KeyError(name_like_string(label))
@property
def spark_frame(self) -> spark.DataFrame:
""" Return the managed Spark DataFrame. """
return self._sdf
@lazy_property
def data_spark_column_names(self) -> List[str]:
""" Return the managed column field names. """
return self.spark_frame.select(self.data_spark_columns).columns
@property
def data_spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed data columns. """
return self._data_spark_columns
@property
def index_spark_column_names(self) -> List[str]:
""" Return the managed index field names. """
return self.spark_frame.select(self.index_spark_columns).columns
@property
def index_spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed index columns. """
return self._index_spark_columns
@lazy_property
def spark_column_names(self) -> List[str]:
""" Return all the field names including index field names. """
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed columns including index columns. """
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(
not spark_column_equals(spark_column, scol)
for scol in index_spark_columns
)
]
@property
def index_names(self) -> List[Optional[Tuple]]:
""" Return the managed index names. """
return self._index_names
@lazy_property
def index_level(self) -> int:
""" Return the level of the index. """
return len(self._index_names)
@property
def column_labels(self) -> List[Tuple]:
""" Return the managed column index. """
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
""" Return the level of the column index. """
return len(self._column_label_names)
@property
def column_label_names(self) -> List[Optional[Tuple]]:
""" Return names of the index levels. """
return self._column_label_names
@property
def index_dtypes(self) -> List[Dtype]:
""" Return dtypes for the managed index columns. """
return self._index_dtypes
@property
def data_dtypes(self) -> List[Dtype]:
""" Return dtypes for the managed columns. """
return self._data_dtypes
@lazy_property
def to_internal_spark_frame(self) -> spark.DataFrame:
"""
Return as Spark DataFrame. This contains index columns as well
and should be only used for internal purposes.
"""
index_spark_columns = self.index_spark_columns
data_columns = []
for spark_column in self.data_spark_columns:
if all(
not spark_column_equals(spark_column, scol)
for scol in index_spark_columns
):
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
""" Return as pandas DataFrame. """
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)
@lazy_property
def arguments_for_restore_index(self) -> Dict:
""" Create arguments for `restore_index`. """
column_names = []
ext_dtypes = {
col: dtype
for col, dtype in zip(self.index_spark_column_names, self.index_dtypes)
if isinstance(dtype, extension_dtypes)
}
categorical_dtypes = {
col: dtype
for col, dtype in zip(self.index_spark_column_names, self.index_dtypes)
if isinstance(dtype, CategoricalDtype)
}
for spark_column, column_name, dtype in zip(
self.data_spark_columns, self.data_spark_column_names, self.data_dtypes
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column_equals(spark_column, index_spark_column):
column_names.append(index_spark_column_name)
break
else:
column_names.append(column_name)
if isinstance(dtype, extension_dtypes):
ext_dtypes[column_name] = dtype
elif isinstance(dtype, CategoricalDtype):
categorical_dtypes[column_name] = dtype
return dict(
index_columns=self.index_spark_column_names,
index_names=self.index_names,
data_columns=column_names,
column_labels=self.column_labels,
column_label_names=self.column_label_names,
ext_dtypes=ext_dtypes,
categorical_dtypes=categorical_dtypes,
)
@staticmethod
def restore_index(
pdf: pd.DataFrame,
*,
index_columns: List[str],
index_names: List[Tuple],
data_columns: List[str],
column_labels: List[Tuple],
column_label_names: List[Tuple],
ext_dtypes: Dict[str, Dtype] = None,
categorical_dtypes: Dict[str, CategoricalDtype] = None
) -> pd.DataFrame:
"""
Restore pandas DataFrame indices using the metadata.
:param pdf: the pandas DataFrame to be processed.
:param index_columns: the original column names for index columns.
:param index_names: the index names after restored.
:param data_columns: the original column names for data columns.
:param column_labels: the column labels after restored.
:param column_label_names: the column label names after restored.
:param ext_dtypes: the map from the original column names to extension data types.
:param categorical_dtypes: the map from the original column names to categorical types.
:return: the restored pandas DataFrame
>>> pdf = pd.DataFrame({"index": [10, 20, 30], "a": ['a', 'b', 'c'], "b": [0, 2, 1]})
>>> InternalFrame.restore_index(
... pdf,
... index_columns=["index"],
... index_names=[("idx",)],
... data_columns=["a", "b", "index"],
... column_labels=[("x",), ("y",), ("z",)],
... column_label_names=[("lv1",)],
... ext_dtypes=None,
... categorical_dtypes={"b": CategoricalDtype(categories=["i", "j", "k"])}
... ) # doctest: +NORMALIZE_WHITESPACE
lv1 x y z
idx
10 a i 10
20 b k 20
30 c j 30
"""
if ext_dtypes is not None and len(ext_dtypes) > 0:
pdf = pdf.astype(ext_dtypes, copy=True)
if categorical_dtypes is not None:
for col, dtype in categorical_dtypes.items():
pdf[col] = pd.Categorical.from_codes(
pdf[col], categories=dtype.categories, ordered=dtype.ordered
)
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]
if len(column_label_names) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)
else:
pdf.columns = pd.Index(
[None if label is None else label[0] for label in column_labels], name=names[0],
)
return pdf
@lazy_property
def resolved_copy(self) -> "InternalFrame":
""" Copy the immutable InternalFrame with the updates resolved. """
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
def with_new_sdf(
self,
spark_frame: spark.DataFrame,
*,
index_dtypes: Optional[List[Dtype]] = None,
data_columns: Optional[List[str]] = None,
data_dtypes: Optional[List[Dtype]] = None
) -> "InternalFrame":
""" Copy the immutable InternalFrame with the updates by the specified Spark DataFrame.
:param spark_frame: the new Spark DataFrame
:param index_dtypes: the index dtypes. If None, the original dtyeps are used.
:param data_columns: the new column names. If None, the original one is used.
:param data_dtypes: the data dtypes. If None, the original dtyeps are used.
:return: the copied InternalFrame.
"""
if index_dtypes is None:
index_dtypes = self.index_dtypes
else:
assert len(index_dtypes) == len(self.index_dtypes), (
len(index_dtypes),
len(self.index_dtypes),
)
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
if data_dtypes is None:
data_dtypes = self.data_dtypes
else:
assert len(data_dtypes) == len(self.column_labels), (
len(data_dtypes),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
index_dtypes=index_dtypes,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_dtypes=data_dtypes,
)
def with_new_columns(
self,
scols_or_pssers: Sequence[Union[spark.Column, "Series"]],
*,
column_labels: Optional[List[Tuple]] = None,
data_dtypes: Optional[List[Dtype]] = None,
column_label_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
keep_order: bool = True
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Columns or Series.
:param scols_or_pssers: the new Spark Columns or Series.
:param column_labels: the new column index.
If None, the column_labels of the corresponding `scols_or_pssers` is used if it is
Series; otherwise the original one is used.
:param data_dtypes: the new dtypes.
If None, the dtypes of the corresponding `scols_or_pssers` is used if it is Series;
otherwise the dtypes will be inferred from the corresponding `scols_or_pssers`.
:param column_label_names: the new names of the column index levels.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if column_labels is None:
if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):
column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]
else:
assert len(scols_or_pssers) == len(self.column_labels), (
len(scols_or_pssers),
len(self.column_labels),
)
column_labels = []
for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):
if isinstance(scol_or_psser, Series):
column_labels.append(scol_or_psser._column_label)
else:
column_labels.append(label)
else:
assert len(scols_or_pssers) == len(column_labels), (
len(scols_or_pssers),
len(column_labels),
)
data_spark_columns = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
scol = scol_or_psser.spark.column
else:
scol = scol_or_psser
data_spark_columns.append(scol)
if data_dtypes is None:
data_dtypes = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
data_dtypes.append(scol_or_psser.dtype)
else:
data_dtypes.append(None)
else:
assert len(scols_or_pssers) == len(data_dtypes), (
len(scols_or_pssers),
len(data_dtypes),
)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
else:
index_spark_columns = self.index_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_dtypes=data_dtypes,
column_label_names=column_label_names,
)
def with_filter(self, pred: Union[spark.Column, "Series"]) -> "InternalFrame":
""" Copy the immutable InternalFrame with the updates by the predicate.
:param pred: the predicate to filter.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
condition = pred.spark.column
else:
spark_type = self.spark_frame.select(pred).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
condition = pred
return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))
def with_new_spark_column(
self,
column_label: Tuple,
scol: spark.Column,
*,
dtype: Optional[Dtype] = None,
keep_order: bool = True
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Column.
:param column_label: the column label to be updated.
:param scol: the new Spark Column
:param dtype: the new dtype.
If not specified, the dtypes will be inferred from the spark Column.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
idx = self.column_labels.index(column_label)
data_spark_columns = self.data_spark_columns.copy()
data_spark_columns[idx] = scol
data_dtypes = self.data_dtypes.copy()
data_dtypes[idx] = dtype
return self.with_new_columns(
data_spark_columns, data_dtypes=data_dtypes, keep_order=keep_order
)
def select_column(self, column_label: Tuple) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the specified column.
:param column_label: the column label to use.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
return self.copy(
column_labels=[column_label],
data_spark_columns=[self.spark_column_for(column_label)],
data_dtypes=[self.dtype_for(column_label)],
column_label_names=None,
)
def copy(
self,
*,
spark_frame: Union[spark.DataFrame, _NoValueType] = _NoValue,
index_spark_columns: Union[List[spark.Column], _NoValueType] = _NoValue,
index_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
index_dtypes: Union[Optional[List[Dtype]], _NoValueType] = _NoValue,
column_labels: Union[Optional[List[Tuple]], _NoValueType] = _NoValue,
data_spark_columns: Union[Optional[List[spark.Column]], _NoValueType] = _NoValue,
data_dtypes: Union[Optional[List[Dtype]], _NoValueType] = _NoValue,
column_label_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue
) -> "InternalFrame":
""" Copy the immutable InternalFrame.
:param spark_frame: the new Spark DataFrame. If not specified, the original one is used.
:param index_spark_columns: the list of Spark Column.
If not specified, the original ones are used.
:param index_names: the index names. If not specified, the original ones are used.
:param index_dtypes: the index dtypes. If not specified, the original dtyeps are used.
:param column_labels: the new column labels. If not specified, the original ones are used.
:param data_spark_columns: the new Spark Columns.
If not specified, the original ones are used.
:param data_dtypes: the data dtypes. If not specified, the original dtyeps are used.
:param column_label_names: the new names of the column index levels.
If not specified, the original ones are used.
:return: the copied immutable InternalFrame.
"""
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_spark_columns is _NoValue:
index_spark_columns = self.index_spark_columns
if index_names is _NoValue:
index_names = self.index_names
if index_dtypes is _NoValue:
index_dtypes = self.index_dtypes
if column_labels is _NoValue:
column_labels = self.column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self.data_spark_columns
if data_dtypes is _NoValue:
data_dtypes = self.data_dtypes
if column_label_names is _NoValue:
column_label_names = self.column_label_names
return InternalFrame(
spark_frame=cast(spark.DataFrame, spark_frame),
index_spark_columns=cast(List[spark.Column], index_spark_columns),
index_names=cast(Optional[List[Optional[Tuple]]], index_names),
index_dtypes=cast(Optional[List[Dtype]], index_dtypes),
column_labels=cast(Optional[List[Tuple]], column_labels),
data_spark_columns=cast(Optional[List[spark.Column]], data_spark_columns),
data_dtypes=cast(Optional[List[Dtype]], data_dtypes),
column_label_names=cast(Optional[List[Optional[Tuple]]], column_label_names),
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
""" Create an immutable DataFrame from pandas DataFrame.
:param pdf: :class:`pd.DataFrame`
:return: the created immutable DataFrame
"""
index_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
]
columns = pdf.columns
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [(col,) for col in columns]
column_label_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in columns.names
]
(
pdf,
index_columns,
index_dtypes,
data_columns,
data_dtypes,
) = InternalFrame.prepare_pandas_frame(pdf)
schema = StructType(
[
StructField(
name, infer_pd_series_spark_type(col, dtype), nullable=bool(col.isnull().any()),
)
for (name, col), dtype in zip(pdf.iteritems(), index_dtypes + data_dtypes)
]
)
sdf = default_session().createDataFrame(pdf, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index_names,
index_dtypes=index_dtypes,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_dtypes=data_dtypes,
column_label_names=column_label_names,
)
@staticmethod
def prepare_pandas_frame(
pdf: pd.DataFrame, *, retain_index: bool = True
) -> Tuple[pd.DataFrame, List[str], List[Dtype], List[str], List[Dtype]]:
"""
Prepare pandas DataFrame for creating Spark DataFrame.
:param pdf: the pandas DataFrame to be prepared.
:param retain_index: whether the indices should be retained.
:return: the tuple of
- the prepared pandas dataFrame
- index column names for Spark DataFrame
- index dtypes of the given pandas DataFrame
- data column names for Spark DataFrame
- data dtypes of the given pandas DataFrame
>>> pdf = pd.DataFrame(
... {("x", "a"): ['a', 'b', 'c'],
... ("y", "b"): pd.Categorical(["i", "k", "j"], categories=["i", "j", "k"])},
... index=[10, 20, 30])
>>> prepared, index_columns, index_dtypes, data_columns, data_dtypes = (
... InternalFrame.prepare_pandas_frame(pdf))
>>> prepared
__index_level_0__ (x, a) (y, b)
0 10 a 0
1 20 b 2
2 30 c 1
>>> index_columns
['__index_level_0__']
>>> index_dtypes
[dtype('int64')]
>>> data_columns
['(x, a)', '(y, b)']
>>> data_dtypes
[dtype('O'), CategoricalDtype(categories=['i', 'j', 'k'], ordered=False)]
"""
pdf = pdf.copy()
data_columns = [name_like_string(col) for col in pdf.columns]
pdf.columns = data_columns
if retain_index:
index_nlevels = pdf.index.nlevels
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]
pdf.index.names = index_columns
reset_index = pdf.reset_index()
else:
index_nlevels = 0
index_columns = []
reset_index = pdf
index_dtypes = list(reset_index.dtypes)[:index_nlevels]
data_dtypes = list(reset_index.dtypes)[index_nlevels:]
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
elif isinstance(dt, CategoricalDtype):
col = col.cat.codes
reset_index[name] = col.replace({np.nan: None})
return reset_index, index_columns, index_dtypes, data_columns, data_dtypes
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.internal
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.internal.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.internal tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.internal,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ah-anssi/SecuML | SecuML/core/ActiveLearning/QueryStrategies/AnnotationQueries/AladinAnnotationQueries.py | 1 | 12484 | # SecuML
# Copyright (C) 2016-2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import copy
import numpy as np
import pandas as pd
import time
from SecuML.core.Classification.Configuration.GaussianNaiveBayesConfiguration import GaussianNaiveBayesConfiguration
from SecuML.core.Classification.Configuration.TestConfiguration.UnlabeledLabeledConf import UnlabeledLabeledConf
from SecuML.core.Classification.Classifiers.GaussianNaiveBayes import GaussianNaiveBayes
from SecuML.core.Classification.ClassifierDatasets import ClassifierDatasets
from SecuML.core.Clustering.Evaluation.PerformanceIndicators import PerformanceIndicators
from SecuML.core.Tools import matrix_tools
from .AnnotationQueries import AnnotationQueries
class AladinAtLeastTwoFamilies(Exception):
def __str__(self):
return '''Aladin requires that the initial annotated dataset
contains at least two different families.'''
class AladinAnnotationQueries(AnnotationQueries):
def __init__(self, iteration, conf):
AnnotationQueries.__init__(self, iteration, 'aladin')
self.num_annotations = conf.num_annotations
self.conf = conf
self.checkInputData()
def runModels(self):
self.datasets = self.iteration.update_model.datasets
self.getLogisticRegressionResults()
self.runNaiveBayes()
def generateAnnotationQueries(self):
self.computeScores()
self.generateQueriesFromScores()
def checkInputData(self):
train_instances = self.iteration.datasets.getAnnotatedInstances()
num_families = len(set(train_instances.annotations.getFamilies()))
if num_families < 2:
raise AladinAtLeastTwoFamilies()
def getLogisticRegressionResults(self):
multiclass = self.iteration.update_model.models['multiclass']
self.lr_predicted_proba = multiclass.testing_monitoring.predictions_monitoring.all_predicted_proba
self.lr_predicted_labels = multiclass.testing_monitoring.predictions_monitoring.predictions[
'predictions']
self.lr_class_labels = multiclass.class_labels
self.lr_time = multiclass.training_execution_time
self.lr_time += multiclass.testing_execution_time
def createNaiveBayesConf(self):
test_conf = UnlabeledLabeledConf()
naive_bayes_conf = GaussianNaiveBayesConfiguration(
4, False, True, test_conf)
return naive_bayes_conf
def runNaiveBayes(self):
naive_bayes_conf = self.createNaiveBayesConf()
# Update training data - the naive Bayes classifier is trained on all the data
self.datasets.test_instances.annotations.setFamilies(
list(self.lr_predicted_labels))
all_datasets = ClassifierDatasets(None, naive_bayes_conf.sample_weight)
train_instances = copy.deepcopy(self.datasets.train_instances)
train_instances.union(self.datasets.test_instances)
all_datasets.setDatasets(train_instances, None)
self.evalClusteringPerf(all_datasets.train_instances)
# Train the naive Bayes detection model and predict
self.naive_bayes = GaussianNaiveBayes(naive_bayes_conf)
self.naive_bayes.training(all_datasets)
self.nb_time = self.naive_bayes.training_execution_time
num_test_instances = self.datasets.test_instances.numInstances()
self.datasets.test_instances.annotations.setFamilies(
[None] * num_test_instances)
if num_test_instances == 0:
self.nb_predicted_log_proba = []
else:
self.nb_predicted_log_proba = self.naive_bayes.pipeline.predict_log_proba(
self.datasets.test_instances.features.getValues())
start_time = time.time()
if num_test_instances == 0:
self.nb_predicted_labels = []
else:
self.nb_predicted_labels = self.naive_bayes.pipeline.predict(
self.datasets.test_instances.features.getValues())
self.nb_time += time.time() - start_time
self.nb_class_labels = self.naive_bayes.class_labels
def evalClusteringPerf(self, instances):
self.clustering_perf = PerformanceIndicators()
self.clustering_perf.generateEvaluation(
instances.ground_truth.getFamilies(), instances.annotations.getFamilies())
def computeScores(self):
self.createScoresDataFrame()
self.computeUncertaintyScores()
self.computeAnomalousScores()
def createScoresDataFrame(self):
test_instances = self.datasets.test_instances
num_test_instances = self.datasets.test_instances.numInstances()
self.scores = pd.DataFrame(np.zeros((num_test_instances, 5)),
index=test_instances.ids.getIds(),
columns=['lr_prediction', 'lr_score', 'nb_prediction', 'nb_score', 'queried'])
self.scores['queried'] = [False] * num_test_instances
# Uncertain instances have a low difference between the probability of belonging to
# the most likely family and the second most likely family.
# In Aladin, the authors consider this measure of uncertainty instead of the entropy
# used by Pelleg and Moore (Active Learning for Anomaly and Rare-Category Detection).
def computeUncertaintyScores(self):
self.scores['lr_prediction'] = self.lr_predicted_labels
lr_scores = []
for i, predicted_label in enumerate(self.scores['lr_prediction']):
predicted_label_index = np.where(
self.lr_class_labels == predicted_label)[0]
predicted_proba = self.lr_predicted_proba[i, predicted_label_index]
proba = predicted_proba - self.lr_predicted_proba[i, :]
proba[predicted_label_index] = 2
score = np.min(proba)
lr_scores.append(score)
self.scores['lr_score'] = lr_scores
# Anomalous instances have a low probability of belonging to the assigned family
def computeAnomalousScores(self):
self.scores['nb_prediction'] = self.nb_predicted_labels
for c in set(self.nb_predicted_labels):
c_instances = self.scores.loc[self.scores['nb_prediction']
== c].index.values
c_features = self.datasets.test_instances.features.getInstancesFromIds(
c_instances)
c_likelihood = self.naive_bayes.logLikelihood(c_features, c)
self.scores.loc[self.scores['nb_prediction']
== c, 'nb_score'] = c_likelihood
def generateQueriesFromScores(self):
assert(np.array_equal(self.lr_class_labels, self.nb_class_labels))
lr_predicted_proba_df = self.generateLrPredictedProbaDataFrame()
num_families = len(self.lr_class_labels)
self.annotation_queries = []
# There are fewer annotation queries than the number of families
if self.num_annotations <= num_families:
if self.iteration.iteration_number % 2 == 0:
classifier = 'lr'
else:
classifier = 'nb'
matrix_tools.sortDataFrame(
self.scores, classifier + '_score', True, True)
selected_instances = self.scores.index.tolist()[
:self.num_annotations]
for instance_id in selected_instances:
query = self.generateAnnotationQuery(
instance_id, 0, None, None)
self.annotation_queries.append(query)
return
# Otherwise
num_uncertain = [0] * num_families
num_anomalous = [0] * num_families
families_scores = self.generateFamiliesScoresTables()
num_annotations = 0
stop = False
selected_instances = []
while not stop:
for i, family in enumerate(list(self.lr_class_labels)):
if num_uncertain[i] <= num_anomalous[i]:
classifier = 'lr'
num_uncertain[i] += 1
else:
classifier = 'nb'
num_anomalous[i] += 1
scores = families_scores[classifier][i]
selected_rows = scores.loc[scores['queried'] == False]
if len(selected_rows) > 0:
query = selected_rows.index.tolist()[0]
else:
# No anomalous or uncertain instances available for annotation
# Select the most likely instance according to the logistic regression output
self.conf.logger.debug(
family + ': no anomalous, no uncertain instances')
selected_rows = lr_predicted_proba_df.loc[lr_predicted_proba_df['queried'] == False]
selected_rows = matrix_tools.sortDataFrame(
selected_rows, family, False, False)
selection = selected_rows.index.tolist()
# Break condition - There is no instance left in the unlabelled pool
if len(selection) == 0:
stop = True
break
else:
query = selection[0]
# Add annotation query and set queried = True
num_annotations += 1
selected_instances.append(query)
for c in ['nb', 'lr']:
predicted_class = self.scores.loc[query, c + '_prediction']
predicted_class_index = np.where(
self.lr_class_labels == predicted_class)[0][0]
families_scores[c][predicted_class_index].set_value(
query, 'queried', True)
self.scores.set_value(query, 'queried', True)
lr_predicted_proba_df.set_value(query, 'queried', True)
# Break condition - self.num_annotations instances have been queried
if num_annotations >= self.num_annotations:
stop = True
break
for instance_id in selected_instances:
query = self.generateAnnotationQuery(instance_id, 0, None, None)
self.annotation_queries.append(query)
def generateLrPredictedProbaDataFrame(self):
num_test_instances = self.datasets.test_instances.numInstances()
lr_predicted_proba_df = pd.DataFrame(np.zeros((num_test_instances, len(self.lr_class_labels) + 1)),
index=self.datasets.test_instances.ids.getIds(),
columns=list(self.lr_class_labels) + ['queried'])
if num_test_instances > 0:
lr_predicted_proba_df.iloc[:, :-1] = self.lr_predicted_proba
lr_predicted_proba_df['queried'] = [False] * num_test_instances
return lr_predicted_proba_df
def generateFamiliesScoresTables(self, classifier=None):
if classifier is None:
families_scores = {}
families_scores['lr'] = self.generateFamiliesScoresTables('lr')
families_scores['nb'] = self.generateFamiliesScoresTables('nb')
return families_scores
families_scores = []
for i, family in enumerate(list(self.lr_class_labels)):
selection = self.scores[classifier + '_prediction']
if selection.shape[0] > 0:
family_scores = self.scores.loc[self.scores[classifier +
'_prediction'] == family]
family_scores = matrix_tools.sortDataFrame(
family_scores, classifier + '_score', True, False)
else:
family_scores = pd.DataFrame(
columns=self.scores.columns.values)
families_scores.append(family_scores)
return families_scores
| gpl-2.0 |
jpn--/larch | tests/test_nnnl.py | 1 | 3786 | from larch.model import Model
import numpy
from pytest import approx
import pandas
from larch.data_services.examples import ITINERARY_RAW
from larch.data_services import H5PodCE
from larch import DataService, Model
from larch.util.common_functions import fourier_series, piecewise_linear
import pytest
@pytest.mark.skip(reason="NNNL is not functioning correctly presently")
def test_2_tier():
df = ITINERARY_RAW()
df['outDepTimeHours'] = numpy.round(df['outDepTime'] / 3600).astype(int)
pod_ce = H5PodCE.from_dataframe(
df,
caselabels='individual',
altlabels='alternative',
)
_, sa = pod_ce.new_alternative_codes(
groupby=[
# 'nFlights',
'outDepTimeHours',
],
name='AltsByFlightsDepHours',
create_index='AltsByFlightsDepHours_idx',
groupby_prefixes=[
# "F",
"H",
],
overwrite=True,
)
dx = DataService.from_CE_and_SA(pod_ce, sa)
m = Model(dataservice=dx)
from .. import P,X,PX
m.utility_ca = (
+ sum(PX(f'normalize({i})') for i in [
'totalTripDurationMinutes',
'log(totalPrice)',
'nFlights',
])
+ fourier_series("outDepTime/86400", length=4)
+ fourier_series("outArrTime/86400", length=4)
)
m.availability_var = 'is_avail'
m.choice_ca_var = 'choice'
m.magic_nesting(sa)
m.unmangle()
m.estimate()
assert m.loglike() == approx(-1552.7201587407562)
from larch.model.nnnl import NNNL
n = NNNL(m)
n.finalize()
n.load_data()
n.set_values(m.pf.value / m.pf.value['MU_outDepTimeHours'])
n.set_value('MU_outDepTimeHours', m.pf.value['MU_outDepTimeHours'])
assert n.loglike(n.pf.value) == approx( m.loglike(m.pf.value) )
numpy.testing.assert_array_almost_equal(
n.d_logsums(),
m.d_logsums()
)
@pytest.mark.skip(reason="NNNL is not functioning correctly presently")
def test_3_tier():
df = ITINERARY_RAW()
df['outDepTimeHours'] = numpy.round(df['outDepTime'] / 3600).astype(int)
pod_ce = H5PodCE.from_dataframe(
df,
caselabels='individual',
altlabels='alternative',
)
_, sa = pod_ce.new_alternative_codes(
groupby=[
'nFlights',
'outDepTimeHours',
],
name='AltsByFlightsDepHours',
create_index='AltsByFlightsDepHours_idx',
groupby_prefixes=[
"F",
"H",
],
overwrite=True,
)
dx = DataService.from_CE_and_SA(pod_ce, sa)
m = Model(dataservice=dx)
from .. import P,X,PX
m.utility_ca = (
+ sum(PX(f'normalize({i})') for i in [
'totalTripDurationMinutes',
'log(totalPrice)',
'nFlights',
])
+ fourier_series("outDepTime/86400", length=4)
+ fourier_series("outArrTime/86400", length=4)
)
m.availability_var = 'is_avail'
m.choice_ca_var = 'choice'
m.magic_nesting(sa)
m.unmangle()
m.set_values({
'MU_nFlights': 0.75,
'MU_outDepTimeHours': 0.5,
'cos_2πoutArrTime/86400': -0.24352756182332694,
'cos_2πoutDepTime/86400': -0.5941495103991815,
'cos_4πoutArrTime/86400': 0.07133782207978602,
'cos_4πoutDepTime/86400': -0.45117462119982826,
'normalize(log(totalPrice))': -1.225219579598701,
'normalize(nFlights)': -2.3288120787774154,
'normalize(totalTripDurationMinutes)': -0.44072158058922295,
'sin_2πoutArrTime/86400': -1.0494189248239782,
'sin_2πoutDepTime/86400': 0.06320548375577714,
'sin_4πoutArrTime/86400': -0.4007737199632005,
'sin_4πoutDepTime/86400': -0.15198982460097238,
})
m.load_data()
assert approx(-1555.1201442643503) == m.loglike()
from larch.model.nnnl import NNNL
n = NNNL(m)
n.finalize()
n.load_data()
n.set_values(m.pf.value / m.pf.value['MU_outDepTimeHours'])
n.set_value('MU_outDepTimeHours', m.pf.value['MU_outDepTimeHours'] / m.pf.value['MU_nFlights'])
n.set_value('MU_nFlights', m.pf.value['MU_nFlights'])
assert n.loglike(n.pf.value) == approx( m.loglike(m.pf.value) )
numpy.testing.assert_array_almost_equal(
n.d_logsums(),
m.d_logsums()
)
| gpl-3.0 |
ngoix/OCRF | sklearn/feature_selection/tests/test_chi2.py | 56 | 2400 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
mhdella/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
NunoEdgarGub1/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
syedjafri/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
kjung/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
ilyes14/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/svm/tests/test_sparse.py | 95 | 12156 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
KarlTDebiec/myplotspec | manage_output.py | 1 | 4072 | # -*- coding: utf-8 -*-
# myplotspec.manage_output.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Decorator to manage the output of matplotlib figures.
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
################################### CLASSES ###################################
class manage_output(object):
"""
Decorator to manage the output of matplotlib figures.
Saves figure returned by wrapped function to a file named
``outfile``; passing additional keyword arguments ``savefig_kw`` to
``Figure.savefig()``. For pdf output, additional argument
``outfiles`` may be provided; containing a dictionary whose keys are
the paths to output pdf files, and whose values are open PdfPages
objects representing those files. The purpose of this is to allow
figures output from multiple calls to the wrapped function to be
output to sequential pages of the same pdf file. Typically
``outfiles`` will be initialized before calling this wrapped
function; and once calls to the function is complete the
``PdfPages.close()`` method of each outfile in ``outfiles`` is
called.
.. todo:
- Support show()
"""
def __call__(self, function):
"""
Wraps function or method.
Arguments:
function (function): Function or method to wrap
Returns:
(function): Wrapped function or method
"""
from functools import wraps
decorator = self
self.function = function
@wraps(function)
def wrapped_function(*args, **kwargs):
"""
Wrapped version of function or method
Arguments:
outfile (str, PdfPages): outfile path or PDFpages
object
outfiles (dict): Nascent dict of [outfile path]: PdfPages
savefig_kw (dict): Keyword arguments passed to savefig()
args (tuple): Arguments passed to function
kwargs (dict): Keyword arguments passed to function
Returns:
Return value of wrapped function
"""
from os.path import abspath, expandvars
import six
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
verbose = kwargs.get("verbose", 1)
debug = kwargs.get("debug", 0)
figure = function(*args, **kwargs)
outfile = kwargs.pop("outfile", "outfile.pdf")
outfiles = kwargs.pop("outfiles", None)
savefig_kw = kwargs.pop("savefig_kw", {})
if not isinstance(outfile, list):
outfile = [outfile]
for of in outfile:
sf_kw = savefig_kw.copy()
if isinstance(of, matplotlib.backends.backend_pdf.PdfPages):
of_path = abspath(of._file.fh.name)
elif isinstance(of, six.string_types):
of_path = abspath(expandvars(of))
if of_path.endswith("pdf"):
sf_kw["format"] = "pdf"
if outfiles is None:
of_pdf = PdfPages(of_path)
figure.savefig(of_pdf, **sf_kw)
of_pdf.close()
elif of_path in outfiles:
of_pdf = outfiles[of_path]
figure.savefig(of_pdf, **sf_kw)
else:
of_pdf = outfiles[of_path] = PdfPages(of_path)
figure.savefig(of_pdf, **sf_kw)
else:
figure.savefig(of_path, **sf_kw)
if verbose:
print("Figure saved to '{0}'.".format(of_path))
return figure
return wrapped_function
| bsd-3-clause |
cloudera/ibis | ibis/tests/expr/test_window_functions.py | 2 | 9929 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.common.exceptions as com
from ibis.expr.window import _determine_how, rows_with_max_lookback
from ibis.tests.util import assert_equal
def test_compose_group_by_apis(alltypes):
t = alltypes
w = ibis.window(group_by=t.g, order_by=t.f)
diff = t.d - t.d.lag()
grouped = t.group_by('g').order_by('f')
expr = grouped[t, diff.name('diff')]
expr2 = grouped.mutate(diff=diff)
expr3 = grouped.mutate([diff.name('diff')])
window_expr = (t.d - t.d.lag().over(w)).name('diff')
expected = t.projection([t, window_expr])
assert_equal(expr, expected)
assert_equal(expr, expr2)
assert_equal(expr, expr3)
def test_combine_windows(alltypes):
t = alltypes
w1 = ibis.window(group_by=t.g, order_by=t.f)
w2 = ibis.window(preceding=5, following=5)
w3 = w1.combine(w2)
expected = ibis.window(
group_by=t.g, order_by=t.f, preceding=5, following=5
)
assert_equal(w3, expected)
w4 = ibis.window(group_by=t.a, order_by=t.e)
w5 = w3.combine(w4)
expected = ibis.window(
group_by=[t.g, t.a], order_by=[t.f, t.e], preceding=5, following=5
)
assert_equal(w5, expected)
# Cannot combine windows of varying types.
w6 = ibis.range_window(preceding=5, following=5)
with pytest.raises(ibis.common.exceptions.IbisInputError):
w1.combine(w6)
def test_combine_windows_with_zero_offset():
w1 = ibis.window(preceding=0, following=5)
w2 = ibis.window(preceding=7, following=10)
w3 = w1.combine(w2)
expected = ibis.window(preceding=7, following=5)
assert_equal(w3, expected)
w4 = ibis.window(preceding=3, following=0)
w5 = w4.combine(w2)
expected = ibis.window(preceding=3, following=10)
assert_equal(w5, expected)
def test_combine_window_with_interval_offset(alltypes):
t = alltypes
w1 = ibis.trailing_range_window(
preceding=ibis.interval(days=3), order_by=t.e
)
w2 = ibis.trailing_range_window(
preceding=ibis.interval(days=4), order_by=t.f
)
w3 = w1.combine(w2)
expected = ibis.trailing_range_window(
preceding=ibis.interval(days=3), order_by=[t.e, t.f]
)
assert_equal(w3, expected)
w4 = ibis.range_window(following=ibis.interval(days=5), order_by=t.e)
w5 = ibis.range_window(following=ibis.interval(days=7), order_by=t.f)
expected = ibis.range_window(
following=ibis.interval(days=5), order_by=[t.e, t.f]
)
w6 = w4.combine(w5)
assert_equal(w6, expected)
def test_combine_window_with_max_lookback():
w1 = ibis.trailing_window(rows_with_max_lookback(3, ibis.interval(days=5)))
w2 = ibis.trailing_window(rows_with_max_lookback(5, ibis.interval(days=7)))
w3 = w1.combine(w2)
expected = ibis.trailing_window(
rows_with_max_lookback(3, ibis.interval(days=5))
)
assert_equal(w3, expected)
def test_replace_window(alltypes):
t = alltypes
w1 = ibis.window(preceding=5, following=1, group_by=t.a, order_by=t.b)
w2 = w1.group_by(t.c)
expected = ibis.window(
preceding=5, following=1, group_by=[t.a, t.c], order_by=t.b
)
assert_equal(w2, expected)
w3 = w1.order_by(t.d)
expected = ibis.window(
preceding=5, following=1, group_by=t.a, order_by=[t.b, t.d]
)
assert_equal(w3, expected)
w4 = ibis.trailing_window(
rows_with_max_lookback(3, ibis.interval(months=3))
)
w5 = w4.group_by(t.a)
expected = ibis.trailing_window(
rows_with_max_lookback(3, ibis.interval(months=3)), group_by=t.a
)
assert_equal(w5, expected)
def test_over_auto_bind(alltypes):
# GH #542
t = alltypes
w = ibis.window(group_by='g', order_by='f')
expr = t.f.lag().over(w)
actual_window = expr.op().args[1]
expected = ibis.window(group_by=t.g, order_by=t.f)
assert_equal(actual_window, expected)
def test_window_function_bind(alltypes):
# GH #532
t = alltypes
w = ibis.window(group_by=lambda x: x.g, order_by=lambda x: x.f)
expr = t.f.lag().over(w)
actual_window = expr.op().args[1]
expected = ibis.window(group_by=t.g, order_by=t.f)
assert_equal(actual_window, expected)
def test_auto_windowize_analysis_bug(con):
# GH #544
t = con.table('airlines')
def metric(x):
return x.arrdelay.mean().name('avg_delay')
annual_delay = (
t[t.dest.isin(['JFK', 'SFO'])]
.group_by(['dest', 'year'])
.aggregate(metric)
)
what = annual_delay.group_by('dest')
enriched = what.mutate(grand_avg=annual_delay.avg_delay.mean())
expr = (
annual_delay.avg_delay.mean()
.name('grand_avg')
.over(ibis.window(group_by=annual_delay.dest))
)
expected = annual_delay[annual_delay, expr]
assert_equal(enriched, expected)
def test_mutate_sorts_keys(con):
t = con.table('airlines')
m = t.arrdelay.mean()
g = t.group_by('dest')
result = g.mutate(zzz=m, yyy=m, ddd=m, ccc=m, bbb=m, aaa=m)
expected = g.mutate(
[
m.name('aaa'),
m.name('bbb'),
m.name('ccc'),
m.name('ddd'),
m.name('yyy'),
m.name('zzz'),
]
)
assert_equal(result, expected)
def test_window_bind_to_table(alltypes):
t = alltypes
w = ibis.window(group_by='g', order_by=ibis.desc('f'))
w2 = w.bind(alltypes)
expected = ibis.window(group_by=t.g, order_by=ibis.desc(t.f))
assert_equal(w2, expected)
def test_preceding_following_validate(alltypes):
# these all work
[
ibis.window(preceding=0),
ibis.window(following=0),
ibis.window(preceding=0, following=0),
ibis.window(preceding=(None, 4)),
ibis.window(preceding=(10, 4)),
ibis.window(following=(4, None)),
ibis.window(following=(4, 10)),
]
# these are ill-specified
error_cases = [
lambda: ibis.window(preceding=(1, 3)),
lambda: ibis.window(preceding=(3, 1), following=2),
lambda: ibis.window(preceding=(3, 1), following=(2, 4)),
lambda: ibis.window(preceding=-1),
lambda: ibis.window(following=-1),
lambda: ibis.window(preceding=(-1, 2)),
lambda: ibis.window(following=(2, -1)),
]
for i, case in enumerate(error_cases):
with pytest.raises(Exception):
case()
def test_max_rows_with_lookback_validate(alltypes):
t = alltypes
mlb = rows_with_max_lookback(3, ibis.interval(days=5))
window = ibis.trailing_window(mlb, order_by=t.i)
t.f.lag().over(window)
window = ibis.trailing_window(mlb)
with pytest.raises(com.IbisInputError):
t.f.lag().over(window)
window = ibis.trailing_window(mlb, order_by=t.a)
with pytest.raises(com.IbisInputError):
t.f.lag().over(window)
window = ibis.trailing_window(mlb, order_by=[t.i, t.a])
with pytest.raises(com.IbisInputError):
t.f.lag().over(window)
def test_window_equals(alltypes):
t = alltypes
w1 = ibis.window(preceding=1, following=2, group_by=t.a, order_by=t.b)
w2 = ibis.window(preceding=1, following=2, group_by=t.a, order_by=t.b)
assert w1.equals(w2)
w3 = ibis.window(preceding=1, following=2, group_by=t.a, order_by=t.c)
assert not w1.equals(w3)
w4 = ibis.range_window(preceding=ibis.interval(hours=3), group_by=t.d)
w5 = ibis.range_window(preceding=ibis.interval(hours=3), group_by=t.d)
assert w4.equals(w5)
w6 = ibis.range_window(preceding=ibis.interval(hours=1), group_by=t.d)
assert not w4.equals(w6)
w7 = ibis.trailing_window(
rows_with_max_lookback(3, ibis.interval(days=5)),
group_by=t.a,
order_by=t.b,
)
w8 = ibis.trailing_window(
rows_with_max_lookback(3, ibis.interval(days=5)),
group_by=t.a,
order_by=t.b,
)
assert w7.equals(w8)
w9 = ibis.trailing_window(
rows_with_max_lookback(3, ibis.interval(months=5)),
group_by=t.a,
order_by=t.b,
)
assert not w7.equals(w9)
def test_determine_how():
how = _determine_how((None, 5))
assert how == 'rows'
how = _determine_how((3, 1))
assert how == 'rows'
how = _determine_how(5)
assert how == 'rows'
how = _determine_how(np.int64(7))
assert how == 'rows'
how = _determine_how(ibis.interval(days=3))
assert how == 'range'
how = _determine_how(ibis.interval(months=5) + ibis.interval(days=10))
assert how == 'range'
how = _determine_how(rows_with_max_lookback(3, ibis.interval(months=3)))
assert how == 'rows'
how = _determine_how(rows_with_max_lookback(3, pd.Timedelta(days=3)))
assert how == 'rows'
how = _determine_how(
rows_with_max_lookback(np.int64(7), ibis.interval(months=3))
)
assert how == 'rows'
with pytest.raises(TypeError):
_determine_how(8.9)
with pytest.raises(TypeError):
_determine_how('invalid preceding')
with pytest.raises(TypeError):
_determine_how({'rows': 1, 'max_lookback': 2})
with pytest.raises(TypeError):
_determine_how(
rows_with_max_lookback(
ibis.interval(days=3), ibis.interval(months=1)
)
)
with pytest.raises(TypeError):
_determine_how([3, 5])
| apache-2.0 |
0x0all/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/utils/extmath.py | 19 | 27505 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix
size : integer
Size of the return array
n_iter : integer
Number of power iterations used to stabilize the result
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q : 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitely specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X : array-like, shape (M, N) or (M, )
Argument to the logistic function
out : array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out : array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like, shape (M, N)
Argument to the logistic function
copy : bool, optional
Copy X or not.
Returns
-------
out : array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
| bsd-3-clause |
hesam-setareh/nest-simulator | examples/nest/Potjans_2014/spike_analysis.py | 15 | 6288 | # -*- coding: utf-8 -*-
#
# spike_analysis.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Merges spike files, produces raster plots, calculates and plots firing rates
import numpy as np
import glob
import matplotlib.pyplot as plt
import os
datapath = '../data'
# get simulation time and numbers of neurons recorded from sim_params.sli
f = open(os.path.join(datapath, 'sim_params.sli'), 'r')
for line in f:
if 't_sim' in line:
T = float(line.split()[1])
if '/record_fraction_neurons_spikes' in line:
record_frac = line.split()[1]
f.close()
f = open(os.path.join(datapath, 'sim_params.sli'), 'r')
for line in f:
if record_frac == 'true':
if 'frac_rec_spikes' in line:
frac_rec = float(line.split()[1])
else:
if 'n_rec_spikes' in line:
n_rec = int(line.split()[1])
f.close()
T_start = 200. # starting point of analysis (to avoid transients)
# load GIDs
gidfile = open(os.path.join(datapath, 'population_GIDs.dat'), 'r')
gids = []
for l in gidfile:
a = l.split()
gids.append([int(a[0]), int(a[1])])
print('Global IDs:')
print(gids)
print()
# number of populations
num_pops = len(gids)
print('Number of populations:')
print(num_pops)
print()
# first GID in each population
raw_first_gids = [gids[i][0] for i in np.arange(len(gids))]
# population sizes
pop_sizes = [gids[i][1] - gids[i][0] + 1 for i in np.arange(len(gids))]
# numbers of neurons for which spikes were recorded
if record_frac == 'true':
rec_sizes = [int(pop_sizes[i] * frac_rec)
for i in xrange(len(pop_sizes))]
else:
rec_sizes = [n_rec] * len(pop_sizes)
# first GID of each population once device GIDs are dropped
first_gids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(len(pop_sizes))]
# last GID of each population once device GIDs are dropped
last_gids = [int(np.sum(pop_sizes[:i + 1]))
for i in np.arange(len(pop_sizes))]
# convert lists to a nicer format, i.e. [[2/3e, 2/3i], []....]
Pop_sizes = [pop_sizes[i:i + 2]
for i in xrange(0, len(pop_sizes), 2)]
print('Population sizes:')
print(Pop_sizes)
print()
Raw_first_gids = [raw_first_gids[i:i + 2] for i in
xrange(0, len(raw_first_gids), 2)]
First_gids = [first_gids[i:i + 2] for i in xrange(0, len(first_gids), 2)]
Last_gids = [last_gids[i:i + 2] for i in xrange(0, len(last_gids), 2)]
# total number of neurons in the simulation
num_neurons = last_gids[len(last_gids) - 1]
print('Total number of neurons:')
print(num_neurons)
print()
# load spikes from gdf files, correct GIDs and merge them in population files,
# and store spike trains
# will contain neuron id resolved spike trains
neuron_spikes = [[] for i in np.arange(num_neurons + 1)]
# container for population-resolved spike data
spike_data = [[[], []], [[], []], [[], []], [[], []], [[], []], [[], []],
[[], []], [[], []]]
counter = 0
for layer in ['0', '1', '2', '3']:
for population in ['0', '1']:
output = os.path.join(datapath,
'population_spikes-{}-{}.gdf'.format(layer,
population))
file_pattern = os.path.join(datapath,
'spikes_{}_{}*'.format(layer, population))
files = glob.glob(file_pattern)
print('Merge ' + str(
len(files)) + ' spike files from L' + layer + 'P' + population)
if files:
merged_file = open(output, 'w')
for f in files:
data = open(f, 'r')
for l in data:
a = l.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_gid = Raw_first_gids[int(layer)][int(population)]
first_gid = First_gids[int(layer)][int(population)]
a[0] = a[0] - raw_first_gid + first_gid
if (a[1] > T_start): # discard data in the start-up phase
spike_data[counter][0].append(num_neurons - a[0])
spike_data[counter][1].append(a[1] - T_start)
neuron_spikes[a[0]].append(a[1] - T_start)
converted_line = str(a[0]) + '\t' + str(a[1]) + '\n'
merged_file.write(converted_line)
data.close()
merged_file.close()
counter += 1
clrs = ['0', '0.5', '0', '0.5', '0', '0.5', '0', '0.5']
plt.ion()
# raster plot
plt.figure(1)
counter = 1
for j in np.arange(num_pops):
for i in np.arange(first_gids[j], first_gids[j] + rec_sizes[j]):
plt.plot(neuron_spikes[i],
np.ones_like(neuron_spikes[i]) + sum(rec_sizes) - counter,
'k o', ms=1, mfc=clrs[j], mec=clrs[j])
counter += 1
plt.xlim(0, T - T_start)
plt.ylim(0, sum(rec_sizes))
plt.xlabel(r'time (ms)')
plt.ylabel(r'neuron id')
plt.savefig(os.path.join(datapath, 'rasterplot.png'))
# firing rates
rates = []
temp = 0
for i in np.arange(num_pops):
for j in np.arange(first_gids[i], last_gids[i]):
temp += len(neuron_spikes[j])
rates.append(temp / (rec_sizes[i] * (T - T_start)) * 1e3)
temp = 0
print()
print('Firing rates:')
print(rates)
plt.figure(2)
ticks = np.arange(num_pops)
plt.bar(ticks, rates, width=0.9, color='k')
xticklabels = ['L2/3e', 'L2/3i', 'L4e', 'L4i', 'L5e', 'L5i', 'L6e', 'L6i']
plt.setp(plt.gca(), xticks=ticks + 0.5, xticklabels=xticklabels)
plt.xlabel(r'subpopulation')
plt.ylabel(r'firing rate (spikes/s)')
plt.savefig(os.path.join(datapath, 'firing_rates.png'))
plt.show()
| gpl-2.0 |
theoryno3/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
ndingwall/scikit-learn | benchmarks/bench_mnist.py | 23 | 6874 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from joblib import Memory
from sklearn.datasets import fetch_openml
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_openml('mnist_784')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(),
'RandomForest': RandomForestClassifier(),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1,
C=1e4),
'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1,
C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
maciekcc/tensorflow | tensorflow/contrib/learn/__init__.py | 25 | 2458 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
zhuolinho/linphone | submodules/mswebrtc/webrtc/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.py | 11 | 4681 | #!/usr/bin/env python
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This script is used to plot simulation dynamics.
# Able to plot each flow separately. Other plot boxes can be added,
# currently one for Throughput, one for Latency and one for Packet Loss.
import matplotlib
import matplotlib.pyplot as plt
import numpy
import re
import sys
# Change this to True to save the figure to a file. Look below for details.
save_figure = False
class Variable(object):
def __init__(self, variable):
self._ID = variable[0]
self._xlabel = variable[1]
self._ylabel = variable[2]
self._subplot = variable[3]
self._y_max = variable[4]
self.samples = dict()
def getID(self):
return self._ID
def getXLabel(self):
return self._xlabel
def getYLabel(self):
return self._ylabel
def getSubplot(self):
return self._subplot
def getYMax(self):
return self._y_max
def getNumberOfFlows(self):
return len(self.samples)
def addSample(self, line):
groups = re.search(r'_(((\d)+((,(\d)+)*))_(\D+))#\d@(\S+)', line)
# Each variable will be plotted in a separated box.
var_name = groups.group(1)
alg_name = groups.group(8)
alg_name = alg_name.replace('_', ' ')
if alg_name not in self.samples.keys():
self.samples[alg_name] = {}
if var_name not in self.samples[alg_name].keys():
self.samples[alg_name][var_name] = []
sample = re.search(r'(\d+\.\d+)\t([-]?\d+\.\d+)', line)
s = (sample.group(1), sample.group(2))
self.samples[alg_name][var_name].append(s)
def plotVar(v, ax, show_legend, show_x_label):
if show_x_label:
ax.set_xlabel(v.getXLabel(), fontsize='large')
ax.set_ylabel(v.getYLabel(), fontsize='large')
for alg in v.samples.keys():
for series in v.samples[alg].keys():
x = [sample[0] for sample in v.samples[alg][series]]
y = [sample[1] for sample in v.samples[alg][series]]
x = numpy.array(x)
y = numpy.array(y)
line = plt.plot(x, y, label=alg, linewidth=4.0)
colormap = {'Available0':'#AAAAAA',
'Available1':'#AAAAAA',
'GCC0':'#80D000',
'GCC1':'#008000',
'GCC2':'#00F000',
'GCC3':'#00B000',
'GCC4':'#70B020',
'NADA0':'#0000AA',
'NADA1':'#A0A0FF',
'NADA2':'#0000FF',
'NADA3':'#C0A0FF',
'NADA4':'#9060B0',}
flow_id = re.search(r'(\d+(,\d+)*)', series) # One or multiple ids.
key = alg + flow_id.group(1)
if key in colormap:
plt.setp(line, color=colormap[key])
elif alg == 'TCP':
plt.setp(line, color='#AAAAAA')
else:
plt.setp(line, color='#654321')
if alg.startswith('Available'):
plt.setp(line, linestyle='--')
plt.grid(True)
# x1, x2, y1, y2
_, x2, _, y2 = plt.axis()
if v.getYMax() >= 0:
y2 = v.getYMax()
plt.axis((0, x2, 0, y2))
if show_legend:
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.40),
shadow=True, fontsize='large', ncol=len(v.samples))
def main():
variables = [
('Throughput_kbps', "Time (s)", "Throughput (kbps)", 1, 4000),
('Delay_ms', "Time (s)", "One-way Delay (ms)", 2, 500),
('Packet_Loss', "Time (s)", "Packet Loss Ratio", 3, 1.0),
# ('Sending_Estimate_kbps', "Time (s)", "Sending Estimate (kbps)",
# 4, 4000),
]
var = []
# Create objects.
for variable in variables:
var.append(Variable(variable))
# Add samples to the objects.
for line in sys.stdin:
if line.startswith("[ RUN ]"):
test_name = re.search(r'\.(\w+)', line).group(1)
if line.startswith("PLOT"):
for v in var:
if v.getID() in line:
v.addSample(line)
matplotlib.rcParams.update({'font.size': 48/len(variables)})
# Plot variables.
fig = plt.figure()
# Offest and threshold on the same plot.
n = var[-1].getSubplot()
i = 0
for v in var:
ax = fig.add_subplot(n, 1, v.getSubplot())
plotVar(v, ax, i == 0, i == n - 1)
i += 1
if save_figure:
fig.savefig(test_name + ".png")
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/numpy/linalg/linalg.py | 1 | 86449 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertNdSquareness(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def _solve_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def _tensorinv_dispatcher(a, ind=None):
return (a,)
@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def _unary_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.25, 0.75],
[ 0.75, -0.25]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def _matrix_power_dispatcher(a, n):
return (a,)
@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
try:
n = operator.index(n)
except TypeError:
raise TypeError("exponent must be an integer")
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[1.+0.j, 0.-2.j],
[0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def _qr_dispatcher(a, mode=None):
return (a,)
@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
* 'full' : alias of 'reduced', deprecated
* 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = np.linalg.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(np.linalg.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def _eigvalsh_dispatcher(a, UPLO=None):
return (a,)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([1., 2., 3.])
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([1.+1.j, 1.-1.j])
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([2.+0.j, 0.+0.j])
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([1., 1.])
array([[1., 0.],
[0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
(conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
[ 0. +0.89442719j, 0. -0.4472136j ]])
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
return (a,)
@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
a, wrap = _makearray(a)
if hermitian:
# note: lapack returns eigenvalues in reverse order to our contract.
# reversing is cheap by design in numpy, so we do so to be consistent
if compute_uv:
s, u = eigh(a)
s = s[..., ::-1]
u = u[..., ::-1]
# singular values are unsigned, move the sign into v
vt = transpose(u * sign(s)[..., None, :]).conjugate()
s = abs(s)
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
return s
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def _cond_dispatcher(x, p=None):
return (x,)
@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
_assertNoEmpty2d(x)
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assertRankAtLeast2(x)
_assertNdSquareness(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def _matrix_rank_dispatcher(M, tol=None, hermitian=None):
return (M,)
@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
Input vector or stack of matrices.
tol : (...) array_like, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Returns
-------
rank : (...) array_like
Rank of M.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False, hermitian=hermitian)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def _pinv_dispatcher(a, rcond=None, hermitian=None):
return (a,)
@array_function_dispatch(_pinv_dispatcher)
def pinv(a, rcond=1e-15, hermitian=False):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def _lstsq_dispatcher(a, b, rcond=None):
return (a, b)
@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
Solves the equation :math:`a x = b` by computing a vector `x` that
minimizes the squared Euclidean 2-norm :math:`\| b - a x \|^2_2`.
The equation may be under-, well-, or over-determined (i.e., the
number of linearly independent rows of `a` can be less than, equal
to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> m, c
(1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> _ = plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
# FIXME: real_t is unused
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
if n_rhs == 0:
# lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
if m == 0:
x[...] = 0
if n_rhs == 0:
# remove the item we added
x = x[..., :n_rhs]
resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
return (x,)
@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= (1 / ord)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def _multidot_dispatcher(arrays):
return arrays
@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random((10000, 100))
>>> B = np.random.random((100, 1000))
>>> C = np.random.random((1000, 5))
>>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
>>> _ = multi_dot([A, B, C, D])
instead of::
>>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| apache-2.0 |
TiKeil/Master-thesis-LOD | python_files/generate_figures/7.2_Perturbations.py | 1 | 3613 | # This file is part of the master thesis "Variational crimes in the Localized orthogonal decomposition method":
# https://github.com/TiKeil/Masterthesis-LOD.git
# Copyright holder: Tim Keil
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import buildcoef2d
import matplotlib.pyplot as plt
from visualize import drawCoefficient, ExtradrawCoefficient
bg = 0.05 #background
val = 1 #values
NWorldFine = np.array([42, 42])
CoefClass = buildcoef2d.Coefficient2d(NWorldFine,
bg = bg, # background
val = val, # values
length = 2, # length
thick = 2, # thickness
space = 2, # space between values
probfactor = 1, # probability of an value
right = 1, # shape 1
down = 0, # shape 2
diagr1 = 0, # shape 3
diagr2 = 0, # shape 4
diagl1 = 0, # shape 5
diagl2 = 0, # shape 6
LenSwitch = None, # various length
thickSwitch = None, # various thickness
ChannelHorizontal = None, # horizontal Channels
ChannelVertical = None, # vertical Channels
BoundarySpace = True # additional space on the boundary
)
A = CoefClass.BuildCoefficient() # coefficient in a numpy array
numbers = [13,20,27,44,73] #What entries will be changed
# Change in Value
B = CoefClass.SpecificValueChange( Number = numbers,
ratio = -0.4,
randomvalue = None,
negative = None,
ShapeRestriction = True,
ShapeWave = None,
probfactor = 1,
Original = True)
# Disappearance
C = CoefClass.SpecificVanish( Number = numbers,
PartlyVanish = None,
probfactor = 1,
Original = True)
# Shift
D = CoefClass.SpecificMove( Number = numbers,
steps = 1,
randomstep = None,
randomDirection = None,
Right = 1,
BottomRight = 1,
Bottom = 1,
BottomLeft = 1,
Left = 1,
TopLeft = 1,
Top = 1,
TopRight = 1,
Original = True)
A = A.flatten()
B = B.flatten()
C = C.flatten()
D = D.flatten()
plt.figure("original")
drawCoefficient(NWorldFine, A)
plt.figure("1")
drawCoefficient(NWorldFine, B)
plt.figure("2")
drawCoefficient(NWorldFine, C)
plt.figure("3")
drawCoefficient(NWorldFine, D)
plt.figure('all')
ExtradrawCoefficient(NWorldFine, A, B, C, D)
plt.show() | apache-2.0 |
hobson/pug-invest | setup.py | 1 | 4803 | # setup.py for PUG (PDX Python User Group) package
# the parent name (perhaps a namespace package) you'd import
__namespace_package__ = 'pug'
# the subpackage that this installer is providing that you'd import like __import__(__namespace_package__ + '.' + '__subpackage__')
__subpackage__ = 'invest'
# the name as it will appear in the pypi cheeseshop repositor, not the name you'd use to import it
project_name = '{}'.format(__namespace_package__) + ('-' + __subpackage__ if __subpackage__ else '')
package_name = '{}'.format(__namespace_package__) + ('.' + __subpackage__ if __subpackage__ else '')
from setuptools import find_packages, setup
import os
# # If you want tests to work with django settings.py you need django-setuptest
# from setuptest import test
# # If you want to use setuptest.test instead of the python test,
# # you need to say so in your setup(kwargs) below, like this:
# # setup(cmdclass={'test': test},...
print('Installing package named {} from the {} project, a sub-package/project of the namespace package {}. . .'.format(package_name, project_name, package_name))
global_env, env = {}, {}
package_info_path = os.path.join(__subpackage__, 'package_info.py')
if __namespace_package__:
package_info_path = os.path.join(__namespace_package__, package_info_path)
# FIXME: import this by path instead of executing it
execfile(package_info_path, global_env, env)
print('Found package info in {}: {}'.format(package_info_path, env))
version = env.get('__version__', '0.0.1')
package_docstring = env.get('__doc__', '`{}` python package'.format(project_name))
description = "Investment (time series) quantitative analysis utilities. A subpackage of the pug namespace package."
long_description = package_docstring
__url__ = env.get('__url__', 'http://github.com/hobson/')
__authors__ = env.get('__authors__', ('Hobson <hobson@totalgood.com>',))
try:
long_description = open('README.rst', 'r').read()
except: # (IOError, ImportError, OSError, RuntimeError):
print('WARNING: Unable to find or read README.rst.')
dependency_links = [] # ['http://github.com/hobson/pug-nlp/tarball/master#egg=pug-nlp-master']
EXCLUDE_FROM_PACKAGES = []
print('Installing package named {} from the {} project. . .'.format(package_name, project_name))
packages = list(set([package_name] + list(find_packages(exclude=EXCLUDE_FROM_PACKAGES))))
print('Packages being installed: {}'.format(packages))
# sudo yum install libjpeg-devel openjpeg-devel
install_requires = [
'wsgiref>=0.1.2',
'six>=1.9.0',
'progressbar2>=2.7.3',
'pyzmq>=14.5.0',
'Unidecode>=0.4.16',
'cffi>=0.9.2',
'chardet>=2.3.0',
'pyOpenSSL>=0.14',
'pytz>=2015.2',
'python-dateutil>=2.4.1',
'pandas>=0.15.2',
'xlrd>=0.9.3',
'Pillow>=2.7',
'fuzzywuzzy>=0.5.0',
'python-Levenshtein>=0.12.0',
'python-slugify>=0.1.0',
'seaborn>=0.5.1',
'matplotlib>=1.4.3',
'numpy>=1.9.2',
'scipy>=0.15.1',
'ConcurrentPandas>=0.1.0',
'pug-nlp>=0.0.19',
]
print('install_requires: {}'.format(install_requires))
setup(
name=project_name,
packages=packages,
namespace_packages=[__namespace_package__],
# install non-.py files listed in MANIFEST.in (.js, .html, .txt, .md, etc)
include_package_data=True,
install_requires=install_requires,
dependency_links=dependency_links,
# scripts=['pug/bin/install_requirements'],
# entry_points={'console_scripts': [
# 'test-ann = pug.tests:main',
# ]},
version=version,
description=description,
long_description=long_description,
author=', '.join(__authors__),
author_email=__authors__[0].split('<')[1].strip().strip('>'),
#tests_require=['django-setuptest', 'south'],
#test_suite='setuptest.setuptest.SetupTestSuite',
#cmdclass={'test': test},
# this would install the master branch from github
# url=__url__,
# Force setup.py to use the latest github master source files rather than the cheeseshop tarball:
download_url = "{}/tarball/master".format(__url__),
keywords = ["finance", "quant", "quantitative finance", "time series", "analysis", "data", "science", "data science", "math", "machine-learning", "statistics"],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Mathematics",
],
) | mit |
gandalfcode/gandalf | tests/dust_tests/plot_dustywave.py | 1 | 2178 | #==============================================================================
# plot_dustywave.py
# Plot the gas and dust velocity for the final snapshot in the DUSTWAVE
# problem.
#
# This file is part of GANDALF :
# Graphical Astrophysics code for N-body Dynamics And Lagrangian Fluids
# https://github.com/gandalfcode/gandalf
# Contact : gandalfcode@gmail.com
#
# Copyright (C) 2013 D. A. Hubber, G. Rosotti
#
# GANDALF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# GANDALF is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License (http://www.gnu.org/licenses) for more details.
#==============================================================================
from gandalf.analysis.facade import *
from dustywave_sol import DustyWave
import numpy as np
import matplotlib.pyplot as plt
import sys
if __name__=="__main__":
# Load the sim
if len(sys.argv) > 1:
sim = loadsim(sys.argv[1])
else:
sim = loadsim('DUSTYWAVE')
s = snap(-1)
# Set up the analytical solution
sol = DustyWave(sim, s.t)
# Load the data
x_gas = get_data('x', type='sph')
vx_gas = get_data('vx', type='sph')
x_dust = get_data('x', type='dust')
vx_dust = get_data('vx', type='dust')
Ng, Nd = len(x_gas), len(x_dust)
errnorm_gas = np.linalg.norm(sol.v_gas(x_gas) -vx_gas, ord=1) / Ng
errnorm_dust = np.linalg.norm(sol.v_dust(x_dust)-vx_dust, ord=1) / Nd
print 'Error norms (gas, dust):', errnorm_gas, errnorm_dust
# Plot the numerical and analytical solutions
plt.plot(x_gas, vx_gas, 'k.')
x_gas.sort()
plt.plot(x_gas, sol.v_gas(x_gas), 'k' )
plt.plot(x_dust, vx_dust, 'r.')
x_dust.sort()
plt.plot(x_dust, sol.v_dust(x_dust), 'r' )
plt.xlabel('x')
plt.ylabel('v_x')
plt.show()
| gpl-2.0 |
liangz0707/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
ahnqirage/spark | python/pyspark/serializers.py | 4 | 26207 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's C{batchSize}
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
from pyspark.util import _exception_message
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
def _create_batch(series, timezone):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:param timezone: A timezone to respect when handling timestamp values
:return: Arrow RecordBatch
"""
import decimal
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
# TODO: maybe don't need None check anymore as of Arrow 0.9.1
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
elif t is not None and pa.types.is_string(t) and sys.version < '3':
# TODO: need decode before converting to Arrow in Python 2
# TODO: don't need as of Arrow 0.9.1
return pa.Array.from_pandas(s.apply(
lambda v: v.decode("utf-8") if isinstance(v, str) else v), mask=mask, type=t)
elif t is not None and pa.types.is_decimal(t) and \
LooseVersion("0.9.0") <= LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
# TODO: see ARROW-2432. Remove when the minimum PyArrow version becomes 0.10.0.
return pa.Array.from_pandas(s.apply(
lambda v: decimal.Decimal('NaN') if v is None else v), mask=mask, type=t)
return pa.Array.from_pandas(s, mask=mask, type=t)
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def __init__(self, timezone):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.types import from_arrow_type, \
_check_series_convert_date, _check_series_localize_timestamps
s = arrow_column.to_pandas()
s = _check_series_convert_date(s, from_arrow_type(arrow_column.type))
s = _check_series_localize_timestamps(s, self._timezone)
return s
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series, self._timezone)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, 2)
except pickle.PickleError:
raise
except Exception as e:
emsg = _exception_message(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
cloudpickle.print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| apache-2.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/cross_decomposition/cca_.py | 23 | 3087 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| mit |
f3r/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
diorcety/intellij-community | python/helpers/pydev/pydev_ipython/qt_for_kernel.py | 67 | 2337 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| apache-2.0 |
fivejjs/pgmult | experiments/ctm.py | 1 | 15557 | """
Correlated topic model (LDA) test
"""
from __future__ import division
from __future__ import print_function
import os, re, gzip, time, operator, inspect, hashlib, random
import cPickle as pickle
from collections import namedtuple
from functools import wraps
import scipy.sparse
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
from scipy.misc import logsumexp
import numpy as np
np.random.seed(11223344)
import matplotlib.pyplot as plt
import brewer2mpl
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
colors = colors[:3] + [colors[4]]
from pybasicbayes.util.text import progprint_xrange
from pgmult.lda import StandardLDA, StickbreakingCorrelatedLDA, LogisticNormalCorrelatedLDA
from pgmult.utils import mkdir, ln_psi_to_pi, pi_to_psi
import pgmult.internals.ctm_wrapper as ctm_wrapper
all_categories = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'rec.autos', 'rec.motorcycles',
'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'misc.forsale', 'talk.politics.misc',
'talk.politics.guns', 'talk.politics.mideast', 'talk.religion.misc',
'alt.atheism', 'soc.religion.christian',
]
cachedir = 'result_cache'
##########
# util #
##########
def cached(func):
mkdir(cachedir)
cachebase = os.path.join(cachedir, func.__module__ + func.__name__)
def replace_arrays(v):
if isinstance(v, np.ndarray):
return hashlib.sha1(v).hexdigest()
if isinstance(v, scipy.sparse.csr.csr_matrix):
out = hashlib.sha1(v.data)
out.update(v.indices)
out.update(v.indptr)
return out.hexdigest()
return v
@wraps(func)
def wrapped(*args, **kwargs):
argdict = \
{k:replace_arrays(v) for k,v in
inspect.getcallargs(func,*args,**kwargs).iteritems()}
closurevals = \
[replace_arrays(cell.cell_contents) for cell in func.__closure__ or []]
key = str(hash(frozenset(argdict.items() + closurevals)))
cachefile = cachebase + '.' + key
if os.path.isfile(cachefile):
with gzip.open(cachefile, 'r') as infile:
value = pickle.load(infile)
return value
else:
value = func(*args,**kwargs)
with gzip.open(cachefile, 'w') as outfile:
pickle.dump(value, outfile, protocol=-1)
return value
return wrapped
#############
# loading #
#############
def load_newsgroup_data(V, cats, sort_data=True):
from sklearn.datasets import fetch_20newsgroups
print("Downloading newsgroups data...")
print('cats = %s' % cats)
newsgroups = fetch_20newsgroups(
subset="train", categories=cats, remove=('headers', 'footers', 'quotes'))
return get_sparse_repr(newsgroups.data, V, sort_data)
def load_ap_data(V, sort_data=True):
def fetch_ap():
from cStringIO import StringIO
from urllib2 import urlopen
import tarfile
print("Downloading AP data...")
response = urlopen('http://www.cs.princeton.edu/~blei/lda-c/ap.tgz')
tar = tarfile.open(fileobj=StringIO(response.read()))
return tar.extractfile('ap/ap.txt').read()
docs = re.findall(r'(?<=<TEXT> ).*?(?= </TEXT>)', fetch_ap().translate(None,'\n'))
return get_sparse_repr(docs, V, sort_data)
def sample_documents(counts, words, num=10):
for row in random.sample(list(counts), num):
for col in row.nonzero()[1]:
print('{} '.format(words[col]), end=' ')
print('\n')
def split_test_train(data, train_frac, test_frac, exclude_words_not_in_training=True):
'''
Split the sparse matrix 'data' into two sparse matrices, 'train_data', and
'test_data', such that data = train_data + test_data.
Each row in data corresponds to a document and each column corresponds to a
word in the vocabulary, so that data is shape (num_docs, vocab_size) and
data[i,j] = count of word j in document i.
The test data held out from the training data is a subset of words in a
subset of documents. That is, the data matrix is first split into two types
of row: training rows and testing rows. Training rows are used in training
without any of their data held out. For testing rows, the aim is to train on
a subset of the words in the document and test how well the remaining words
can be predicted. Therefore this function performs two splits: first to
split into training rows and testing rows, and second to split the testing
rows into training counts and held-out counts.
The 'train_frac' and 'test_frac' inputs control what fraction of rows are
marked as training rows and what fraction of words within test rows are held
out, respectively.
'''
def split_rows(mat, p):
def sparse_to_lilrows(mat):
out = mat.tolil()
return zip(out.rows, out.data)
def lilrows_to_csr(lilrows):
out = lil_matrix(mat.shape, dtype=np.uint32)
out.rows, out.data = zip(*lilrows)
return out.tocsr()
def split(lst, p):
def _split(lst, inds):
return [x if ind else ([], []) for x, ind in zip(lst, inds)]
inds = np.random.rand(len(lst)) < p
return _split(lst, ~inds), _split(lst, inds)
return map(lilrows_to_csr, split(sparse_to_lilrows(mat), p))
def split_multinomial(mat, p):
data2 = np.random.binomial(mat.data, p)
data1 = mat.data - data2
m1 = csr_matrix((data1, mat.indices, mat.indptr), mat.shape, dtype=np.uint32)
m2 = csr_matrix((data2, mat.indices, mat.indptr), mat.shape, dtype=np.uint32)
return m1, m2
# split out training rows and testing rows
test_data, train_data = split_rows(data, train_frac)
# within testing rows, hold out some fraction of the words
test_train, test_data = split_multinomial(test_data, test_frac)
# for the counts within testing rows that aren't held out, add them back to
# the training data
train_data += test_train
assert train_data.sum() + test_data.sum() == data.sum()
if exclude_words_not_in_training:
unseen = np.asarray((test_data.sum(0) > 0) & (train_data.sum(0) == 0)).ravel()
print('{} test words were never seen in training data'.format(unseen.sum()))
test_data.data[np.in1d(test_data.indices, np.where(unseen)[0])] = 0
assert np.sum((test_data.sum(0) > 0) & (train_data.sum(0) == 0)) == 0
train_data.eliminate_zeros()
test_data.eliminate_zeros()
return train_data, test_data
def get_sparse_repr(docs, V, sort_data):
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(stop_words="english", max_features=V)
default_preproc = vectorizer.build_preprocessor()
def preproc(s):
return re.sub(r' \d+ ', 'anumber ', default_preproc(s))
vectorizer.preprocessor = preproc
counts = vectorizer.fit_transform(docs).astype(np.uint32)
words = vectorizer.get_feature_names()
if sort_data:
counts, words = sort_vocab(counts, words)
assert is_column_sorted(counts)
print('loaded {} documents with a size {} vocabulary'.format(*counts.shape))
print('with {} words per document on average'.format(np.mean(counts.sum(1))))
print()
return counts, words
def sort_vocab(counts, words):
tots = counts.T.dot(np.ones(counts.shape[0]))
words = [words[idx] for idx in np.argsort(-tots)]
counts = sort_columns_by_counts(counts)
return counts, words
def sparse_from_blocks(blocks):
blocklen = lambda data_indices: data_indices[0].shape[0]
data, indices = map(np.concatenate, zip(*blocks))
indptr = np.concatenate(((0,), np.cumsum(map(blocklen, blocks))))
return data, indices, indptr
def sparse_to_blocks(mat):
data, indices, indptr = mat.data, mat.indices, mat.indptr
slices = map(slice, indptr[:-1], indptr[1:])
return [(data[sl], indices[sl]) for sl in slices]
def sort_columns_by_counts(mat):
count = lambda data_indices1: data_indices1[0].sum()
sorted_cols = sorted(sparse_to_blocks(mat.tocsc()), key=count, reverse=True)
return csc_matrix(sparse_from_blocks(sorted_cols), mat.shape).tocsr()
def is_column_sorted(mat):
a = np.asarray(mat.sum(0)).ravel()
return np.all(a == a[np.argsort(-a)])
#############
# fitting #
#############
Results = namedtuple(
"Results",
["loglikes", "predictive_lls", "perplexities", "samples", "timestamps"])
def fit_lnctm_em(train_data, test_data, T):
if ctm_wrapper.has_ctm_c:
print('Running CTM EM...')
return Results(*ctm_wrapper.fit_ctm_em(train_data, test_data, T))
def sampler_fitter(name, cls, method, initializer):
def fit(train_data, test_data, T, Niter, init_at_em, *args):
resample = operator.methodcaller(method)
def evaluate(model):
ll, pll, perp = \
model.log_likelihood(), model.log_likelihood(test_data), \
model.perplexity(test_data)
return ll, pll, perp
def sample(model):
tic = time.time()
resample(model)
timestep = time.time() - tic
return evaluate(model), timestep
print('Running %s...' % name)
model = cls(train_data, T, *args)
model = initializer(model) if init_at_em and initializer else model
init_val = evaluate(model)
vals, timesteps = zip(*[sample(model) for _ in progprint_xrange(Niter)])
lls, plls, perps = zip(*((init_val,) + vals))
timestamps = np.cumsum((0.,) + timesteps)
return Results(lls, plls, perps, model.copy_sample(), timestamps)
fit.__name__ = name
return fit
def make_ctm_initializer(get_psi):
def initializer(model):
T, V = model.T, model.V
model.beta = np.exp(np.loadtxt('ctm-out/000-log-beta.dat')
.reshape((-1,V))).T
lmbda = np.loadtxt('ctm-out/000-lambda.dat').reshape((-1,T))
nonempty_docs = np.asarray(model.data.sum(1) > 0).ravel()
model.psi[nonempty_docs] = get_psi(lmbda)
model.resample_z()
model.resample_theta_prior()
return model
return initializer
def lda_initializer(model):
T, V = model.T, model.V
model.beta = np.exp(np.loadtxt('ctm-out/000-log-beta.dat')
.reshape((-1,V))).T
lmbda = np.loadtxt('ctm-out/000-lambda.dat').reshape((-1,T))
nonempty_docs = np.asarray(model.data.sum(1) > 0).ravel()
model.theta[nonempty_docs] = ln_psi_to_pi(lmbda)
model.resample_z()
return model
fit_lda_gibbs = sampler_fitter(
'fit_lda_gibbs', StandardLDA, 'resample', lda_initializer)
fit_lda_collapsed = sampler_fitter(
'fit_lda_collapsed', StandardLDA, 'resample_collapsed', lda_initializer)
fit_lnctm_gibbs = sampler_fitter(
'fit_lnctm_gibbs', LogisticNormalCorrelatedLDA, 'resample',
make_ctm_initializer(lambda lmbda: lmbda))
fit_sbctm_gibbs = sampler_fitter(
'fit_sbctm_gibbs', StickbreakingCorrelatedLDA, 'resample',
make_ctm_initializer(lambda lmbda: pi_to_psi(ln_psi_to_pi(lmbda))))
########################
# inspecting results #
########################
def plot_sb_interpretable_results(sb_results, words):
nwords = 5
Sigma = sb_results[-1][-1]
T = Sigma.shape[0]
def get_topwords(topic):
return words[np.argsort(sb_results[-1][0][:,topic])[-nwords:]]
lim = np.abs(Sigma).max()
plt.imshow(np.kron(Sigma,np.ones((50,50))), extent=(0,T,T,0), vmin=-lim, vmax=lim,
cmap='RdBl')
plt.colorbar()
for t in range(T):
print('Topic %d:' % t)
print(get_topwords(t))
print()
def print_topics(std_results, std_collapsed_results, sb_results, ln_results, words, T):
words = np.array(words)
nwords = 5
def get_topwords(result, topic):
return words[np.argsort(result.samples[-1][0][:,topic])[-nwords:]]
names = ['Standard LDA', 'Collapsed LDA', 'LN Correlated LDA', 'SB Correlated LDA']
results = [std_results, std_collapsed_results, sb_results, ln_results]
for name, result in zip(names, results):
print('Top words for %s' % name)
for t in xrange(T):
print('Topic {}: {}'.format(t, ' '.join(get_topwords(result, t))))
print()
def plot_figure_legend():
"""
Make a standalone legend
:return:
"""
from hips.plotting.layout import create_legend_figure
labels = ["SB-CTM", "LN-CTM (Gibbs)", "LN-CTM (VB)", "LDA (Gibbs)"]
fig = create_legend_figure(labels, colors[:4], size=(5.25,0.5),
lineargs={"lw": 2},
legendargs={"columnspacing": 0.75,
"handletextpad": 0.125})
fig.savefig(os.path.join("results", "newsgroups", "legend.pdf"))
def logma(v):
def logavg(v):
return logsumexp(v) - np.log(len(v))
return np.array([logavg(v[n//2:n]) for n in xrange(2,len(v))])
def plot_predictive_lls(result, logaddexp, **kwargs):
plot_kwargs = dict(lw=2, marker='o', markersize=2, markeredgecolor='none')
plot_kwargs.update(kwargs)
if 'color' in kwargs:
plot_kwargs['markerfacecolor'] = kwargs['color']
plt.plot(result.timestamps, result.predictive_lls, **plot_kwargs)
if logaddexp:
plt.plot(result.timestamps[2:], logma(result.predictive_lls), **plot_kwargs)
print(result.predictive_lls[-10:])
plt.legend(loc='lower right')
plt.xlabel('Time (sec)')
plt.ylabel('Held-out predictive log likelihoods')
plt.tight_layout()
#########################
# running experiments #
#########################
if __name__ == '__main__':
## newsgroups
# cats = None # all categories
# T, V = 50, 1000
# alpha_beta, alpha_theta = 0.1, 1.
# train_frac, test_frac = 0.9, 0.5
# data, words = load_newsgroup_data(V, cats)
## AP
T, V = 100, 1000
alpha_beta, alpha_theta = 0.05, 1.
train_frac, test_frac = 0.95, 0.5
data, words = load_ap_data(V)
## print setup
print('T=%d, V=%d' % (T, V))
print('alpha_beta = %0.3f, alpha_theta = %0.3f' % (alpha_beta, alpha_theta))
print('train_frac = %0.3f, test_frac = %0.3f' % (train_frac, test_frac))
print()
## split train test
train_data, test_data = split_test_train(data, train_frac=train_frac, test_frac=test_frac)
## fit and plot
em_results = fit_lnctm_em(train_data, test_data, T)
if em_results is not None:
plot_predictive_lls(em_results, False, color=colors[0], label='LN CTM EM')
lda_results = fit_lda_collapsed(train_data, test_data, T, 1000, True, alpha_beta, alpha_theta)
plot_predictive_lls(lda_results, True, color=colors[1], label='LDA Gibbs')
ln_results = fit_lnctm_gibbs(train_data, test_data, T, 200, True, alpha_beta)
plot_predictive_lls(ln_results, True, color=colors[1], label='LN CTM Gibbs')
sb_results = fit_sbctm_gibbs(train_data, test_data, T, 1500, True, alpha_beta)
plot_predictive_lls(sb_results, True, color=colors[2], label='SB CTM Gibbs')
all_results = {
'em': em_results,
'lda': lda_results,
'ln': ln_results,
'sb': sb_results,
}
with open('ctm_results.pkl','w') as outfile:
pickle.dump(all_results, outfile, protocol=-1)
plt.show()
| mit |
Chuban/moose | test/tests/time_integrators/scalar/run_stiff.py | 8 | 5672 | #!/usr/bin/env python
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(y2_exponent, dt, time_integrator, lam):
'''
Function which actually runs MOOSE.
'''
command_line_args = ['../../../moose_test-opt', '-i', 'stiff.i',
'Executioner/dt={}'.format(dt),
'Executioner/dtmin={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'LAMBDA={}'.format(lam),
'Y2_EXPONENT={}'.format(y2_exponent)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
#
# The columns are in alphabetical order, we want to look at
# "max_error_y1", which is the "stiff" component of the system of
# ODEs.
# The columns are currently given by:
# time,error_y1,error_y2,max_error_y1,max_error_y2,value_y1,value_y1_abs_max,value_y2,value_y2_abs_max,y1,y2
output_filename = 'stiff_out.csv'
last_row = get_last_row(output_filename)
max_error_y1 = float(last_row[3])
value_y1_abs_max = float(last_row[6])
normalized_error = max_error_y1 / value_y1_abs_max
return normalized_error
#
# Main program
#
# implicit methods only - for the large values of lambda considered
# here, explicit methods would require very small timesteps, and so
# they are not considered here.
time_integrators = ['ImplicitEuler', 'CrankNicolson', 'BDF2', 'ImplicitMidpoint', 'LStableDirk2', 'LStableDirk3', 'LStableDirk4', 'AStableDirk4']
# The sequence of timesteps to try
dts = [.5, .25, .125, .0625, .03125, .015625, .0078125]
# The values of lambda to try
# .) lam=-2 is not allowed for p=2 case.
# .) lam=-1 is not allowed for p=1 case.
lams = [-.1, -10, -100, -1000, -10000]
# The values of the y2 exponent to try. 1=linear, 2=nonlinear.
y2_exponents = [1, 2]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-', '--', ':', '-', '--', ':', '--', '-', '-', '-']
# Loop over:
# lambdas
# y2 exponents
# time_integrators
# dts
for lam in lams:
for y2_exponent in y2_exponents:
fig = plt.figure()
ax1 = fig.add_subplot(111)
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(y2_exponent, dt, time_integrator, lam))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Print results for tabulation etc.
print('{} (Slope={})'.format(time_integrator, fit[0]))
print('dt, max_error_y1')
for j in xrange(len(dts)):
print('{}, {}'.format(dts[j], results[j]))
print('') # blank line
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log (\|e\|_{L^{\infty}} / \|y_1\|_{L^{\infty}})$')
# The input file name up to the file extension
filebase = 'linear'
if (y2_exponent != 1):
filebase = 'nonlinear'
# Add a title
ax1.set_title('{}, $\\lambda = {}$'.format(filebase.title(), lam))
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig(filebase + '_lambda_{}.pdf'.format(lam), format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
glouppe/scikit-learn | sklearn/decomposition/truncated_svd.py | 30 | 7896 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
bashkirtsevich/autocode | text_preprocessing/tools/w2v_trainer.py | 1 | 1651 | class my_sentences(object):
def __init__(self, data_path, separator, fields):
import pandas as pd
self._df = pd.read_csv(data_path, sep=separator, engine="python", names=fields)
def __iter__(self):
for i in range(self._df.shape[0]):
yield " ".join([self._df["f1"].loc[i], self._df["f2"].loc[i]]).lower().split()
def train_model(data_path, output_path,
separator=";", fields=("f1", "f2"), min_count=10, size=200):
import gensim, logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = my_sentences(data_path, separator, fields)
model = gensim.models.Word2Vec(
sentences,
min_count=min_count,
size=size)
model.save(output_path)
if __name__ == '__main__':
from optparse import OptionParser
opt_parser = OptionParser(
usage="Usage: %prog -d \"path/to/data/csv\" -s \";\" -o \"path/to/store/trained.nn_model\"")
opt_parser.add_option(
"-d", "--data",
dest="data", metavar="FILE",
help="path to csv file with data for train")
opt_parser.add_option(
"-s", "--separator",
dest="separator", default=";",
help="CSV file separator")
opt_parser.add_option(
"-o", "--output",
dest="output", metavar="FILE",
help="path to file where w2v nn_model will be saved")
(options, args) = opt_parser.parse_args()
if options.data and options.output:
train_model(options.data, options.output, options.separator)
else:
opt_parser.print_usage()
opt_parser.print_help()
| gpl-3.0 |
AnasGhrab/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
kambysese/mne-python | mne/parallel.py | 14 | 6545 | """Parallel util function."""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: Simplified BSD
import logging
import os
from . import get_config
from .utils import logger, verbose, warn, ProgressBar
from .utils.check import int_like
from .fixes import _get_args
if 'MNE_FORCE_SERIAL' in os.environ:
_force_serial = True
else:
_force_serial = None
@verbose
def parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',
total=None, prefer=None, verbose=None):
"""Return parallel instance with delayed function.
Util function to use joblib only if available
Parameters
----------
func : callable
A function.
n_jobs : int
Number of jobs to run in parallel.
max_nbytes : int, str, or None
Threshold on the minimum size of arrays passed to the workers that
triggers automated memory mapping. Can be an int in Bytes,
or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays. Use 'auto' to
use the value set using mne.set_memmap_min_size.
pre_dispatch : int, or str, optional
See :class:`joblib.Parallel`.
total : int | None
If int, use a progress bar to display the progress of dispatched
jobs. This should only be used when directly iterating, not when
using ``split_list`` or :func:`np.array_split`.
If None (default), do not add a progress bar.
prefer : str | None
If str, can be "processes" or "threads". See :class:`joblib.Parallel`.
Ignored if the joblib version is too old to support this.
.. versionadded:: 0.18
%(verbose)s INFO or DEBUG
will print parallel status, others will not.
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object.
my_func: callable
``func`` if not parallel or delayed(func).
n_jobs: int
Number of jobs >= 0.
"""
should_print = (logger.level <= logging.INFO)
# for a single job, we don't need joblib
if n_jobs != 1:
try:
from joblib import Parallel, delayed
except ImportError:
try:
from sklearn.externals.joblib import Parallel, delayed
except ImportError:
warn('joblib not installed. Cannot run in parallel.')
n_jobs = 1
if n_jobs == 1:
n_jobs = 1
my_func = func
parallel = list
else:
# check if joblib is recent enough to support memmaping
p_args = _get_args(Parallel.__init__)
joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
cache_dir = get_config('MNE_CACHE_DIR', None)
if isinstance(max_nbytes, str) and max_nbytes == 'auto':
max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
if max_nbytes is not None:
if not joblib_mmap and cache_dir is not None:
warn('"MNE_CACHE_DIR" is set but a newer version of joblib is '
'needed to use the memmapping pool.')
if joblib_mmap and cache_dir is None:
logger.info(
'joblib supports memapping pool but "MNE_CACHE_DIR" '
'is not set in MNE-Python config. To enable it, use, '
'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
'store temporary files under /dev/shm and can result '
'in large memory savings.')
# create keyword arguments for Parallel
kwargs = {'verbose': 5 if should_print and total is None else 0}
kwargs['pre_dispatch'] = pre_dispatch
if 'prefer' in p_args:
kwargs['prefer'] = prefer
if joblib_mmap:
if cache_dir is None:
max_nbytes = None # disable memmaping
kwargs['temp_folder'] = cache_dir
kwargs['max_nbytes'] = max_nbytes
n_jobs = check_n_jobs(n_jobs)
parallel = _check_wrapper(Parallel(n_jobs, **kwargs))
my_func = delayed(func)
if total is not None:
def parallel_progress(op_iter):
return parallel(ProgressBar(iterable=op_iter, max_value=total))
parallel_out = parallel_progress
else:
parallel_out = parallel
return parallel_out, my_func, n_jobs
def _check_wrapper(fun):
def run(*args, **kwargs):
try:
return fun(*args, **kwargs)
except RuntimeError as err:
msg = str(err.args[0]) if err.args else ''
if msg.startswith('The task could not be sent to the workers'):
raise RuntimeError(
msg + ' Consider using joblib memmap caching to get '
'around this problem. See mne.set_mmap_min_size, '
'mne.set_cache_dir, and buffer_size parallel function '
'arguments (if applicable).')
raise
return run
def check_n_jobs(n_jobs, allow_cuda=False):
"""Check n_jobs in particular for negative values.
Parameters
----------
n_jobs : int
The number of jobs.
allow_cuda : bool
Allow n_jobs to be 'cuda'. Default: False.
Returns
-------
n_jobs : int
The checked number of jobs. Always positive (or 'cuda' if
applicable).
"""
if not isinstance(n_jobs, int_like):
if not allow_cuda:
raise ValueError('n_jobs must be an integer')
elif not isinstance(n_jobs, str) or n_jobs != 'cuda':
raise ValueError('n_jobs must be an integer, or "cuda"')
# else, we have n_jobs='cuda' and this is okay, so do nothing
elif _force_serial:
n_jobs = 1
logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
'serial mode.')
elif n_jobs <= 0:
try:
import multiprocessing
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if n_jobs <= 0:
raise ValueError('If n_jobs has a negative value it must not '
'be less than the number of CPUs present. '
'You\'ve got %s CPUs' % n_cores)
except ImportError:
# only warn if they tried to use something other than 1 job
if n_jobs != 1:
warn('multiprocessing not installed. Cannot run in parallel.')
n_jobs = 1
return n_jobs
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
dustalov/watlink | eval/pathwise.py | 1 | 5020 | #!/usr/bin/env python3
import argparse
import csv
import itertools
import warnings
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
import networkx as nx
from scipy.stats import wilcoxon
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
# The metrics can be ill-defined, but this is OK.
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
parser = argparse.ArgumentParser()
parser.add_argument('--gold', required=True)
parser.add_argument('--significance', action='store_true')
parser.add_argument('--alpha', nargs='?', type=float, default=0.01)
parser.add_argument('path', nargs='+')
args = parser.parse_args()
def sanitize(str):
return str.lower().replace(' ', '_')
def isas(path):
G = nx.DiGraph()
with open(path, newline='') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) > 1 and row[0] and row[1]:
G.add_edge(sanitize(row[0]), sanitize(row[1]))
# Note that we store the sense inventory as an attribute of G.
# TODO: nx.DiGraph subclass?
G.senses = defaultdict(list)
for node in G.nodes():
G.senses[node.rsplit('#', 1)[0]].append(node)
return G
with ProcessPoolExecutor() as executor:
paths = args.path + [args.gold]
resources = {path: G for path, G in zip(paths, executor.map(isas, paths))}
gold = resources.pop(args.gold)
def has_sense_path(G, source, target):
if source not in G.senses or target not in G.senses:
return False
for source_sense, target_sense in itertools.product(G.senses[source], G.senses[target]):
if nx.has_path(G, source_sense, target_sense):
return True
return False
lexicon = gold.senses.keys() & set.union(*(set(G.senses.keys()) for G in resources.values()))
union = [pair for pair in
{(word1.rsplit('#', 1)[0], word2.rsplit('#', 1)[0]) for word1, word2 in gold.edges()} |
set.union(*(set(G.edges()) for G in resources.values()))
if pair[0] in lexicon and pair[1] in lexicon]
true = [int(has_sense_path(gold, *pair)) for pair in union]
index = defaultdict(list)
for pair in union:
index[pair[0]].append(pair)
hyponyms = sorted(index)
def wordwise(G, pairs):
word_true = [int(has_sense_path(gold, *pair)) for pair in pairs]
word_pred = [int(has_sense_path(G, *pair)) for pair in pairs]
return (word_true, word_pred)
def scores(G):
if not args.significance:
return
labels = [wordwise(G, index[word]) for word in hyponyms]
scores = {score: [None] * len(labels) for score in ('precision', 'recall', 'f1')}
for i, (true, pred) in enumerate(labels):
precision, recall, f1, _ = precision_recall_fscore_support(true, pred, average='binary')
# Cast float64 and float just to float.
scores['precision'][i] = float(precision)
scores['recall'][i] = float(recall)
scores['f1'][i] = float(f1)
return scores
def evaluate(path):
G = resources[path]
pred = [int(has_sense_path(G, *pair)) for pair in union]
tn, fp, fn, tp = confusion_matrix(true, pred).ravel()
precision, recall, f1, _ = precision_recall_fscore_support(true, pred, average='binary')
return {
'tn': tn.item(),
'fp': fp.item(),
'fn': fn.item(),
'tp': tp.item(),
'precision': precision.item(),
'recall': recall.item(),
'f1': f1.item(),
'scores': scores(G)
}
with ProcessPoolExecutor() as executor:
results = {path: result for path, result in zip(resources, executor.map(evaluate, resources))}
def pairwise(iterable):
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def significance(metric):
if not args.significance:
return {}
desc, rank = sorted(results.items(), key=lambda item: item[1][metric], reverse=True), 1
ranks = {}
for (path1, results1), (path2, results2) in pairwise(desc):
x, y = list(results1['scores'][metric]), list(results2['scores'][metric])
ranks[path1] = rank
rank += int(wilcoxon(x, y).pvalue < args.alpha)
ranks[path2] = rank
return metric, ranks
with ProcessPoolExecutor() as executor:
ranks = {metric: result for metric, result in executor.map(significance, ('precision', 'recall', 'f1'))}
print('\t'.join(
('path', 'pairs', 'tn', 'fp', 'fn', 'tp', 'precision', 'recall', 'f1', 'precision_rank', 'recall_rank', 'f1_rank')))
for path, values in results.items():
print('\t'.join((
path,
str(resources[path].size()),
str(values['tn']),
str(values['fp']),
str(values['fn']),
str(values['tp']),
str(values['precision']),
str(values['recall']),
str(values['f1']),
str(ranks['precision'].get(path, 0)),
str(ranks['recall'].get(path, 0)),
str(ranks['f1'].get(path, 0))
)))
| mit |
revoer/keystone-8.0.0 | swift/common/middleware/xprofile.py | 8 | 9623 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6200/__profile__/all (object server)
http://localhost:6201/__profile__/current (container server)
http://localhost:6202/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library.
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data.
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
import six
from six.moves import urllib
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
ProfileException
from x_profile.html_viewer import HTMLViewer
from x_profile.profile_model import ProfileLog
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read()
query_dict.update(urllib.parse.parse_qs(qs_in_body,
keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
if isinstance(content, six.text_type):
content = content.encode('utf-8')
return [content]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
| apache-2.0 |
djpine/pyman | Book/chap8/Supporting Materials/FitOscDecay.py | 3 | 2239 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec # for unequal plot boxes
import scipy.optimize
# define function to calculate reduced chi-squared
def RedChiSqr(func, x, y, dy, params):
resids = y - func(x, *params)
chisq = ((resids/dy)**2).sum()
return chisq/float(x.size-params.size)
# define fitting function
def SineGaussDecay(t, A, B, C, tau, omega):
y = A * (1.0 + B*np.cos(omega*t)) * np.exp(-0.5*t*t/(tau*tau)) + C
return y
# read in spectrum from data file
t, decay, unc = np.loadtxt("OscData.txt", skiprows=4, unpack=True)
# initial values for fitting parameters (guesses)
A0 = 15.0
B0 = 0.6
C0 = 1.2*A0
tau0 = 16.0
omega0 = 2.0 * (2.0*np.pi/tau0)
#omega0 = 2.34
# fit data using SciPy's Levenberg-Marquart method
nlfit, nlpcov = scipy.optimize.curve_fit(SineGaussDecay,
t, decay, p0=[A0, B0, C0, tau0, omega0], sigma=unc)
# calculate reduced chi-squared
rchi = RedChiSqr(SineGaussDecay, t, decay, unc, nlfit)
# create fitting function from fitted parameters
A, B, C, tau, omega = nlfit
t_fit = np.linspace(0.0, 1.02*t[-1], 512)
d_fit = SineGaussDecay(t_fit, A, B, C, tau, omega)
# Create figure window to plot data
fig = plt.figure(1, figsize=(8,8)) # extra length for residuals
gs = gridspec.GridSpec(2, 1, height_ratios=[6, 2])
# Top plot: data and fit
ax1 = fig.add_subplot(gs[0])
ax1.plot(t_fit, d_fit)
ax1.errorbar(t, decay, yerr=unc, fmt='or', ecolor='black', ms=4)
ax1.set_xlabel('time (ms)')
ax1.set_ylabel('decay (arb units)')
ax1.text(0.55, 0.8, 'A = {0:0.1f}\nB = {1:0.3f}\nC = {2:0.1f}'.format(A, B, C),
transform = ax1.transAxes)
ax1.text(0.75, 0.8, '$\\tau$ = {0:0.1f}\n$\omega$ = {1:0.3f}\n$\chi^2$ = {2:0.3f}'.format(tau, omega, rchi),
transform = ax1.transAxes)
ax1.set_title('$d(t) = A (1+B\,\cos\,\omega t) e^{-t^2/2\\tau^2} + C$')
# Bottom plot: residuals
resids = decay - SineGaussDecay(t, A, B, C, tau, omega)
ax2 = fig.add_subplot(gs[1])
ax2.axhline(color="gray")
ax2.errorbar(t, resids, yerr = unc, ecolor="black", fmt="ro", ms=4)
ax2.set_xlabel('time (ms)')
ax2.set_ylabel('residuals')
ax2.set_ylim(-5, 5)
yticks = (-5, 0, 5)
ax2.set_yticks(yticks)
plt.savefig('FitOscDecay.pdf')
plt.show()
| cc0-1.0 |
snfactory/extract-star | scripts/pySNIFS_special.py | 1 | 10568 | from matplotlib.mlab import linspace, dist
from matplotlib.patches import Circle, Rectangle
from matplotlib.lines import Line2D
from matplotlib.numerix import array
from matplotlib.transforms import blend_xy_sep_transform
from scipy.special import sqrt
import thread
import pylab
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(0, visible=False, **lineprops)
self.linev = ax.axvline(0, visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class HorizontalSpanSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(xmin, xmax):
print xmin, xmax
span = HorizontalSpanSelector(ax, onselect)
"""
def __init__(self, ax, onselect, minspan=None, useblit=False, rectprops=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(xmin, xmax)
and clear the span.
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.rect = None
self.background = None
self.rectprops = rectprops
self.onselect = onselect
self.useblit = useblit
self.minspan = minspan
trans = blend_xy_sep_transform(self.ax.transData, self.ax.transAxes)
self.rect = Rectangle( (0,0), 0, 1,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
self.pressx = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.rect.set_visible(self.visible)
self.pressx = event.xdata
return False
def release(self, event):
'on button release event'
if self.pressx is None or self.ignore(event): return
self.rect.set_visible(False)
self.canvas.draw()
xmin = self.pressx
xmax = event.xdata
y = event.ydata
if xmin>xmax: xmin, xmax = xmax, xmin
span = xmax - xmin
if self.minspan is not None and span<self.minspan: return
self.onselect(xmin, xmax, y)
self.pressx = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressx is None or self.ignore(event): return
x,y = event.xdata, event.ydata
minx, maxx = x, self.pressx
if minx>maxx: minx, maxx = maxx, minx
self.rect.xy[0] = minx
self.rect.set_width(maxx-minx)
self.update()
return False
class CircleSpanSelector:
"""
Select a center/radius range of the circle for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(center, radius, x, y):
print center, radius
span = CircleSpanSelector(ax, onselect)
"""
def __init__(self, ax, onselect, minspan=None, useblit=False, circprops=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(center, radius, x, y)
where x and y are the coordinate used to calculate the radius
and clear the span.
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
circprops = dict(fc='blue', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if circprops is None:
circprops = dict(fc='w', alpha=0.5)
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.circ = None
self.background = None
self.circprops = circprops
self.onselect = onselect
self.useblit = useblit
self.minspan = minspan
self.circ = Circle( (0,0), 1, **self.circprops)
self.unit_verts = [v for v in self.circ.verts]
self.circ.set_visible(False)
if not self.useblit: self.ax.add_patch(self.circ)
self.pressx = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
print "press"
self.circ.set_visible(self.visible)
self.pressx = event.xdata
self.pressy = event.ydata
self.circ.set_visible(False)
return False
def release(self, event):
'on button release event'
if self.pressx is None or self.ignore(event): return
if self.pressy is None or self.ignore(event): return
self.canvas.draw()
self.center = [self.pressx, self.pressy]
self.radius = sqrt((event.xdata-self.center[0])**2 + (event.ydata-self.center[1])**2)
y = event.ydata
x = event.xdata
if self.minspan is not None and radius<self.minspan: return
self.onselect(self.center, self.radius, x, y)
self.pressx = None
self.pressy = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.circ)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressx is None or self.ignore(event): return
if self.pressy is None or self.ignore(event): return
self.center = [self.pressx,self.pressy]
self.radius = sqrt((event.xdata-self.center[0])**2 + (event.ydata-self.center[1])**2)
if self.radius > 0.5:
self.circ.set_visible(True)
else:
self.circ.set_visible(False)
self.circ.verts = [(v[0]*self.radius+self.center[0],v[1]*self.radius+self.center[1]) for v in self.unit_verts]
pylab.draw()
self.update()
return False
| mit |
shenzebang/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
hsiaoyi0504/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
abhishekgahlot/scikit-learn | sklearn/decomposition/base.py | 12 | 5524 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
wegamekinglc/Finance-Python | PyFin/tests/Analysis/testCrossSectionValueHolders.py | 2 | 22232 | # -*- coding: utf-8 -*-
u"""
Created on 2017-1-6
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from PyFin.Enums import Factors
from PyFin.Analysis.SecurityValueHolders import SecurityLatestValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSRankedSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSTopNSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSBottomNSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSTopNPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSBottomNPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSAverageSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSAverageAdjustedSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSZScoreSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSFillNASecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSResidueSecurityValueHolder
class TestCrossSectionValueHolder(unittest.TestCase):
def setUp(self):
np.random.seed(0)
sample1 = np.random.randn(1000, 6)
sample2 = np.random.randn(1000, 6)
self.datas = {'aapl': {'close': sample1[:, 0], 'open': sample1[:, 1]},
'ibm': {'close': sample2[:, 0], 'open': sample2[:, 1]},
'goog': {'close': sample1[:, 2], 'open': sample1[:, 3]},
'baba': {'close': sample2[:, 2], 'open': sample2[:, 3]},
'tela': {'close': sample1[:, 4], 'open': sample1[:, 5]},
'nflx': {'close': sample2[:, 4], 'open': sample2[:, 5]}
}
def testCSRankedSecurityValueHolderWithSymbolName(self):
benchmark = SecurityLatestValueHolder(x='close')
rankHolder = CSRankedSecurityValueHolder('close')
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values, rankHolder.value.values)
def testCSRankedSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
rankHolder = CSRankedSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values, rankHolder.value.values)
def testCSTopNSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 2
topnHolder = CSTopNSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}}
benchmark.push(data)
topnHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((-benchmarkValues).rank().values <= n, topnHolder.value.values)
def testCSBottomNSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 2
topnHolder = CSBottomNSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}}
benchmark.push(data)
topnHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values <= n, topnHolder.value.values)
def testCSRankedSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
rankHolder = CSRankedSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups).rank().values
np.testing.assert_array_almost_equal(expected_rank, rankHolder.value.values)
def testCSAverageSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
meanHolder = CSAverageSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
meanHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.values.mean(), meanHolder.value.values)
def testCSAverageSecurityValueHolderWithGroup(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanHolder = CSAverageSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_mean = pd.Series(benchmarkValues.to_dict()).groupby(groups).mean()
calculated_mean = meanHolder.value
for name in calculated_mean.index():
if name in ['aapl', 'ibm']:
self.assertAlmostEqual(calculated_mean[name], expected_mean[1])
else:
self.assertAlmostEqual(calculated_mean[name], expected_mean[2])
def testCSPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
perHolder = CSPercentileSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values / len(data), perHolder.value.values)
def testCSTopNPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 0.3
perHolder = CSTopNPercentileSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(((-benchmarkValues).rank().values / len(data)) <= n,
perHolder.value.values)
def testCSBottomNPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 0.3
perHolder = CSBottomNPercentileSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((benchmarkValues.rank().values / len(data)) <= n,
perHolder.value.values)
def testCSPercentileSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
perHolder = CSPercentileSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: x.rank().values / len(x))
np.testing.assert_array_almost_equal(expected_rank, perHolder.value.values)
def testCSAverageAdjustedSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
meanAdjustedHolder = CSAverageAdjustedSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((benchmarkValues - benchmarkValues.mean()).values, meanAdjustedHolder.value.values)
def testCSAverageAdjustedSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSAverageAdjustedSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: x - x.mean())
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZscoreSecurityValueHolder(self):
keys = list(range(1, 11))
values = list(range(10, 0, -1))
data = {}
for i, k in enumerate(keys):
data[k] = {}
data[k]['close'] = values[i]
quantile_value = CSZScoreSecurityValueHolder('close')
quantile_value.push(data)
calculated = quantile_value.value
data = np.linspace(10., 1., 10)
expected = (data - data.mean()) / data.std()
np.testing.assert_array_almost_equal(expected, calculated.values)
def testCSFillNASecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSFillNASecurityValueHolder(benchmark, groups)
def cal_func(x):
x[np.isnan(x)] = np.nanmean(x)
return x
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'tela': {Factors.CLOSE: np.nan,
Factors.OPEN: self.datas['tela'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: np.nan,
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.},
'nflx': {Factors.CLOSE: self.datas['nflx'][Factors.CLOSE][i],
Factors.OPEN: self.datas['nflx'][Factors.OPEN][i],
'ind': 2.}
}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'tela': 1., 'goog': 2., 'baba': 2., 'nflx': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(cal_func)
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZscoreSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSZScoreSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: (x - x.mean()) / x.std(ddof=0))
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZResidueSecurityValueHolder(self):
y = SecurityLatestValueHolder(x='close')
x = SecurityLatestValueHolder(x='open')
res = CSResidueSecurityValueHolder(y, x)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
y.push(data)
x.push(data)
res.push(data)
calculated = res.value.values
y_values = y.value.values
x_values = x.value.values
x_values = np.concatenate([np.ones(shape=(len(x_values), 1)), x_values.reshape(-1, 1)], axis=1)
beta = np.dot(np.linalg.inv(np.dot(x_values.T, x_values)), np.dot(x_values.T, y_values.reshape(-1, 1)))
expected = y_values - np.dot(x_values, beta).flatten()
np.testing.assert_array_almost_equal(calculated, expected)
| mit |
yunfeilu/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
AIML/scikit-learn | sklearn/utils/validation.py | 67 | 24013 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
Mushirahmed/gnuradio | gr-utils/src/python/plot_data.py | 17 | 5768 | #
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.f = scipy.array(f)
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
rstoneback/pysat | pysat/instruments/templates/netcdf_pandas.py | 2 | 7632 | # -*- coding: utf-8 -*-
"""
Generic module for loading netCDF4 files into the pandas format within pysat.
This file may be used as a template for adding pysat support for a new dataset
based upon netCDF4 files, or other file types (with modification).
This routine may also be used to add quick local support for a netCDF4 based
dataset without having to define an instrument module for pysat. Relevant
parameters may be specified when instantiating this Instrument object to
support the relevant file location and naming schemes. This presumes the pysat
developed utils.load_netCDF4 routine is able to load the file. See the load
routine docstring in this module for more.
The routines defined within may also be used when adding a new instrument
to pysat by importing this module and using the functools.partial methods
to attach these functions to the new instrument model. See
pysat/instruments/cnofs_ivm.py for more.
NASA CDAWeb datasets, such as C/NOFS IVM, use the methods within
pysat/instruments/methods/nasa_cdaweb.py to make adding new CDAWeb instruments
easy.
"""
import pandas as pds
import numpy as np
import pysat
# pysat required parameters
platform = 'netcdf'
name = 'pandas'
# dictionary of data 'tags' and corresponding description
tags = {'': 'netCDF4'}
# dictionary of satellite IDs, list of corresponding tags
sat_ids = {'': ['']}
_test_dates = {'': {'': pysat.datetime(2009, 1, 1)}}
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation. This routine provides a convenient
location to print Acknowledgements or restrictions from the mission.
"""
pass
def load(fnames, tag=None, sat_id=None, **kwargs):
"""Loads data using pysat.utils.load_netcdf4 .
This routine is called as needed by pysat. It is not intended
for direct user interaction.
Parameters
----------
fnames : array-like
iterable of filename strings, full path, to data files to be loaded.
This input is nominally provided by pysat itself.
tag : string
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
**kwargs : extra keywords
Passthrough for additional keyword arguments specified when
instantiating an Instrument object. These additional keywords
are passed through to this routine by pysat.
Returns
-------
data, metadata
Data and Metadata are formatted for pysat. Data is a pandas
DataFrame while metadata is a pysat.Meta instance.
Note
----
Any additional keyword arguments passed to pysat.Instrument
upon instantiation are passed along to this routine and through
to the load_netcdf4 call.
Examples
--------
::
inst = pysat.Instrument('sport', 'ivm')
inst.load(2019,1)
# create quick Instrument object for a new, random netCDF4 file
# define filename template string to identify files
# this is normally done by instrument code, but in this case
# there is no built in pysat instrument support
# presumes files are named default_2019-01-01.NC
format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC'
inst = pysat.Instrument('netcdf', 'pandas',
custom_kwarg='test'
data_path='./',
format_str=format_str)
inst.load(2019,1)
"""
return pysat.utils.load_netcdf4(fnames, **kwargs)
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of files corresponding to format_str located at
data_path.
This routine is invoked by pysat and is not intended for direct use by
the end user.
Multiple data levels may be supported via the 'tag' and 'sat_id' input
strings.
Parameters
----------
tag : string ('')
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string ('')
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
data_path : string
Full path to directory containing files to be loaded. This
is provided by pysat. The user may specify their own data path
at Instrument instantiation and it will appear here.
format_str : string (None)
String template used to parse the datasets filenames. If a user
supplies a template string at Instrument instantiation
then it will appear here, otherwise defaults to None.
Returns
-------
pandas.Series
Series of filename strings, including the path, indexed by datetime.
Examples
--------
::
If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template
is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_' +
'v{version:02d}r{revision:04d}.NC'
Note
----
The returned Series should not have any duplicate datetimes. If there are
multiple versions of a file the most recent version should be kept and the
rest discarded. This routine uses the pysat.Files.from_os constructor, thus
the returned files are up to pysat specifications.
Normally the format_str for each supported tag and sat_id is defined within
this routine. However, as this is a generic routine, those definitions
can't be made here. This method could be used in an instrument specific
module where the list_files routine in the new package defines the
format_str based upon inputs, then calls this routine passing both
data_path and format_str.
Alternately, the list_files routine in methods.nasa_cdaweb may also be
used and has more built in functionality. Supported tages and format
strings may be defined within the new instrument module and passed as
arguments to methods.nasa_cdaweb.list_files . For an example on using
this routine, see pysat/instrument/cnofs_ivm.py or cnofs_vefi, cnofs_plp,
omni_hro, timed_see, etc.
"""
return pysat.Files.from_os(data_path=data_path, format_str=format_str)
def download(date_array, tag, sat_id, data_path=None, user=None,
password=None):
"""Downloads data for supported instruments, however this is a template
call.
This routine is invoked by pysat and is not intended for direct use by
the end user.
Parameters
----------
date_array : array-like
list of datetimes to download data for. The sequence of dates need not
be contiguous.
tag : string ('')
Tag identifier used for particular dataset. This input is provided by
pysat.
sat_id : string ('')
Satellite ID string identifier used for particular dataset. This input
is provided by pysat.
data_path : string (None)
Path to directory to download data to.
user : string (None)
User string input used for download. Provided by user and passed via
pysat. If an account is required for dowloads this routine here must
error if user not supplied.
password : string (None)
Password for data download.
Returns
--------
Void : (NoneType)
Downloads data to disk.
"""
print('This is a generic Instrument routine and does not support ' +
'downloading data.')
pass
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
sinhrks/seaborn | examples/many_facets.py | 26 | 1062 | """
Plotting on a large number of facets
====================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks")
# Create a dataset with many short random walks
rs = np.random.RandomState(4)
pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)
pos -= pos[:, 0, np.newaxis]
step = np.tile(range(5), 20)
walk = np.repeat(range(20), 5)
df = pd.DataFrame(np.c_[pos.flat, step, walk],
columns=["position", "step", "walk"])
# Initialize a grid of plots with an Axes for each walk
grid = sns.FacetGrid(df, col="walk", hue="walk", col_wrap=5, size=1.5)
# Draw a horizontal line to show the starting point
grid.map(plt.axhline, y=0, ls=":", c=".5")
# Draw a line plot to show the trajectory of each random walk
grid.map(plt.plot, "step", "position", marker="o", ms=4)
# Adjust the tick positions and labels
grid.set(xticks=np.arange(5), yticks=[-3, 3],
xlim=(-.5, 4.5), ylim=(-3.5, 3.5))
# Adjust the arrangement of the plots
grid.fig.tight_layout(w_pad=1)
| bsd-3-clause |
courtarro/gnuradio | gr-fec/python/fec/polar/decoder.py | 24 | 10396 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
from common import PolarCommon
# for dev
from encoder import PolarEncoder
from matplotlib import pyplot as plt
class PolarDecoder(PolarCommon):
def __init__(self, n, k, frozen_bit_position, frozenbits=None):
PolarCommon.__init__(self, n, k, frozen_bit_position, frozenbits)
self.error_probability = 0.1 # this is kind of a dummy value. usually chosen individually.
self.lrs = ((1 - self.error_probability) / self.error_probability, self.error_probability / (1 - self.error_probability))
self.llrs = np.log(self.lrs)
def _llr_bit(self, bit):
return self.llrs[bit]
def _llr_odd(self, la, lb):
# this functions uses the min-sum approximation
# exact formula: np.log((np.exp(la + lb) + 1) / (np.exp(la) + np.exp(lb)))
return np.sign(la) * np.sign(lb) * np.minimum(np.abs(la), np.abs(lb))
_f_vals = np.array((1.0, -1.0), dtype=float)
def _llr_even(self, la, lb, f):
return (la * self._f_vals[f]) + lb
def _llr_bit_decision(self, llr):
if llr < 0.0:
ui = int(1)
else:
ui = int(0)
return ui
def _retrieve_bit_from_llr(self, lr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._llr_bit_decision(lr)
return ui
def _lr_bit(self, bit):
return self.lrs[bit]
def _lr_odd(self, la, lb):
# la is upper branch and lb is lower branch
return (la * lb + 1) / (la + lb)
def _lr_even(self, la, lb, f):
# la is upper branch and lb is lower branch, f is last decoded bit.
return (la ** (1 - (2 * f))) * lb
def _lr_bit_decision(self, lr):
if lr < 1:
return int(1)
return int(0)
def _get_even_indices_values(self, u_hat):
# looks like overkill for some indexing, but zero and one based indexing mix-up gives you haedaches.
return u_hat[1::2]
def _get_odd_indices_values(self, u_hat):
return u_hat[0::2]
def _calculate_lrs(self, y, u):
ue = self._get_even_indices_values(u)
uo = self._get_odd_indices_values(u)
ya = y[0:y.size//2]
yb = y[(y.size//2):]
la = self._lr_decision_element(ya, (ue + uo) % 2)
lb = self._lr_decision_element(yb, ue)
return la, lb
def _lr_decision_element(self, y, u):
if y.size == 1:
return self._llr_bit(y[0])
if u.size % 2 == 0: # use odd branch formula
la, lb = self._calculate_lrs(y, u)
return self._llr_odd(la, lb)
else:
ui = u[-1]
la, lb = self._calculate_lrs(y, u[0:-1])
return self._llr_even(la, lb, ui)
def _retrieve_bit_from_lr(self, lr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._lr_bit_decision(lr)
return ui
def _lr_sc_decoder(self, y):
# this is the standard SC decoder as derived from the formulas. It sticks to natural bit order.
u = np.array([], dtype=int)
for i in range(y.size):
lr = self._lr_decision_element(y, u)
ui = self._retrieve_bit_from_llr(lr, i)
u = np.append(u, ui)
return u
def _llr_retrieve_bit(self, llr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._llr_bit_decision(llr)
return ui
def _butterfly_decode_bits(self, pos, graph, u):
bit_num = u.size
llr = graph[pos][0]
ui = self._llr_retrieve_bit(llr, bit_num)
# ui = self._llr_bit_decision(llr)
u = np.append(u, ui)
lower_right = pos + (self.N // 2)
la = graph[pos][1]
lb = graph[lower_right][1]
graph[lower_right][0] = self._llr_even(la, lb, ui)
llr = graph[lower_right][0]
# ui = self._llr_bit_decision(llr)
ui = self._llr_retrieve_bit(llr, u.size)
u = np.append(u, ui)
return graph, u
def _lr_sc_decoder_efficient(self, y):
graph = np.full((self.N, self.power + 1), np.NaN, dtype=float)
for i in range(self.N):
graph[i][self.power] = self._llr_bit(y[i])
decode_order = self._vector_bit_reversed(np.arange(self.N), self.power)
decode_order = np.delete(decode_order, np.where(decode_order >= self.N // 2))
u = np.array([], dtype=int)
for pos in decode_order:
graph = self._butterfly(pos, 0, graph, u)
graph, u = self._butterfly_decode_bits(pos, graph, u)
return u
def _stop_propagation(self, bf_entry_row, stage):
# calculate break condition
modulus = 2 ** (self.power - stage)
# stage_size = self.N // (2 ** stage)
# half_stage_size = stage_size // 2
half_stage_size = self.N // (2 ** (stage + 1))
stage_pos = bf_entry_row % modulus
return stage_pos >= half_stage_size
def _butterfly(self, bf_entry_row, stage, graph, u):
if not self.power > stage:
return graph
if self._stop_propagation(bf_entry_row, stage):
upper_right = bf_entry_row - self.N // (2 ** (stage + 1))
la = graph[upper_right][stage + 1]
lb = graph[bf_entry_row][stage + 1]
ui = u[-1]
graph[bf_entry_row][stage] = self._llr_even(la, lb, ui)
return graph
# activate right side butterflies
u_even = self._get_even_indices_values(u)
u_odd = self._get_odd_indices_values(u)
graph = self._butterfly(bf_entry_row, stage + 1, graph, (u_even + u_odd) % 2)
lower_right = bf_entry_row + self.N // (2 ** (stage + 1))
graph = self._butterfly(lower_right, stage + 1, graph, u_even)
la = graph[bf_entry_row][stage + 1]
lb = graph[lower_right][stage + 1]
graph[bf_entry_row][stage] = self._llr_odd(la, lb)
return graph
def decode(self, data, is_packed=False):
if not len(data) == self.N:
raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N))
if is_packed:
data = np.unpackbits(data)
data = self._lr_sc_decoder_efficient(data)
data = self._extract_info_bits(data)
if is_packed:
data = np.packbits(data)
return data
def _extract_info_bits_reversed(self, y):
info_bit_positions_reversed = self._vector_bit_reversed(self.info_bit_position, self.power)
return y[info_bit_positions_reversed]
def decode_systematic(self, data):
if not len(data) == self.N:
raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N))
# data = self._reverse_bits(data)
data = self._lr_sc_decoder_efficient(data)
data = self._encode_natural_order(data)
data = self._extract_info_bits_reversed(data)
return data
def test_systematic_decoder():
ntests = 1000
n = 16
k = 8
frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
encoder = PolarEncoder(n, k, frozenbitposition)
decoder = PolarDecoder(n, k, frozenbitposition)
for i in range(ntests):
bits = np.random.randint(2, size=k)
y = encoder.encode_systematic(bits)
u_hat = decoder.decode_systematic(y)
assert (bits == u_hat).all()
def test_reverse_enc_dec():
n = 16
k = 8
frozenbits = np.zeros(n - k)
frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
bits = np.random.randint(2, size=k)
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
print 'encoded:', encoded
rx = decoder.decode(encoded)
print 'bits:', bits
print 'rx :', rx
print (bits == rx).all()
def compare_decoder_impls():
print '\nthis is decoder test'
n = 8
k = 4
frozenbits = np.zeros(n - k)
# frozenbitposition16 = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
frozenbitposition = np.array((0, 1, 2, 4), dtype=int)
bits = np.random.randint(2, size=k)
print 'bits:', bits
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
print 'encoded:', encoded
rx_st = decoder._lr_sc_decoder(encoded)
rx_eff = decoder._lr_sc_decoder_efficient(encoded)
print 'standard :', rx_st
print 'efficient:', rx_eff
print (rx_st == rx_eff).all()
def main():
# power = 3
# n = 2 ** power
# k = 4
# frozenbits = np.zeros(n - k, dtype=int)
# frozenbitposition = np.array((0, 1, 2, 4), dtype=int)
# frozenbitposition4 = np.array((0, 1), dtype=int)
#
#
# encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
# decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
#
# bits = np.ones(k, dtype=int)
# print "bits: ", bits
# evec = encoder.encode(bits)
# print "froz: ", encoder._insert_frozen_bits(bits)
# print "evec: ", evec
#
# evec[1] = 0
# deced = decoder._lr_sc_decoder(evec)
# print 'SC decoded:', deced
#
# test_reverse_enc_dec()
# compare_decoder_impls()
test_systematic_decoder()
if __name__ == '__main__':
main()
| gpl-3.0 |
snowdj/research_public | drafts/kelly/kelly_kalman_pairs.py | 3 | 11530 | """
Pairs Trading with Kalman Filters
Author: David Edwards
This algorithm extends the Kalman Filtering pairs trading algorithm from a
previous lecture to support multiple pairs. In order to extend the idea,
the previous algorithm was factored into a class so several instances can be
created with different assets.
We chose to apply the kelly criterion to this algo because we felt it would
be a good fit after looking at the beta and max drawdown values. One challenge
with applying Kelly in this case was the initial burn-in period over the course
of collecting returns data.
In this example we are updating leverage based on the kelly score using
historical returns. If the kelly score is less than 1, it uses a leverage
of 1. Since the kelly score can be negative we use max(1.0, kelly) to get the
portfolio's leverage.
"""
import numpy as np
import pandas as pd
from pykalman import KalmanFilter
import statsmodels.api as sm
def initialize(context):
# Quantopian backtester specific variables
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
set_symbol_lookup_date('2014-01-01')
context.pairs = [
KalmanPairTrade(symbol('STX'), symbol('WDC'),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
KalmanPairTrade(symbol('CBI'), symbol('JEC'),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
KalmanPairTrade(symbol('MAS'), symbol('VMC'),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
KalmanPairTrade(symbol('JPM'), symbol('C'),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
KalmanPairTrade(symbol('AON'), symbol('MMC'),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
KalmanPairTrade(symbol('COP'), symbol('CVX'),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
]
weight = 1.8 / len(context.pairs)
for pair in context.pairs:
pair.leverage = weight
context.kelly = KellyLeverage()
schedule_function(context.kelly.update,
time_rule=time_rules.market_close())
schedule_function(update_leverage,
time_rule=time_rules.market_open())
for minute in range(10, 390, 120):
for pair in context.pairs:
schedule_function(pair.trading_logic,
time_rule=time_rules.market_open(minutes=minute))
class KellyLeverage(object):
def __init__(self, minlen=30, maxlen=500):
self.equity = []
self.minlen = minlen
self.maxlen = maxlen
def update(self, context, data):
self.equity.append(context.portfolio.portfolio_value)
if len(self.equity) > self.maxlen:
self.equity = self.equity[-self.maxlen::]
def kelly_score(self):
if len(self.equity) < self.minlen:
return np.nan
returns = np.diff(np.log(self.equity))[1:]
return np.mean(returns) / np.var(returns)
def handle_data(context, data):
record(market_exposure=context.account.net_leverage,
leverage=context.account.leverage,
kelly=context.kelly.kelly_score())
def update_leverage(context, data):
kelly = context.kelly.kelly_score()
if np.isnan(kelly):
return
weight = max(1.0, kelly) / len(context.pairs)
for pair in context.pairs:
pair.leverage = weight
class KalmanPairTrade(object):
def __init__(self, y, x, leverage=1.0, initial_bars=10,
freq='1d', delta=1e-3, maxlen=3000):
self._y = y
self._x = x
self.maxlen = maxlen
self.initial_bars = initial_bars
self.freq = freq
self.delta = delta
self.leverage = leverage
self.Y = KalmanMovingAverage(self._y, maxlen=self.maxlen)
self.X = KalmanMovingAverage(self._x, maxlen=self.maxlen)
self.kf = None
self.entry_dt = pd.Timestamp('1900-01-01', tz='utc')
@property
def name(self):
return "{}~{}".format(self._y.symbol, self._x.symbol)
def trading_logic(self, context, data):
try:
if self.kf is None:
self.initialize_filters(context, data)
return
self.update()
if get_open_orders(sid=self._x) or get_open_orders(sid=self._y):
return
spreads = self.mean_spread()
zscore = (spreads[-1] - spreads.mean()) / spreads.std()
reference_pos = context.portfolio.positions[self._y].amount
now = get_datetime()
if reference_pos:
if (now - self.entry_dt).days > 20:
order_target(self._y, 0.0)
order_target(self._x, 0.0)
return
# Do a PNL check to make sure a reversion at least covered trading costs
# I do this because parameter drift often causes trades to be exited
# before the original spread has become profitable.
pnl = self.get_pnl(context, data)
if zscore > -0.0 and reference_pos > 0 and pnl > 0:
order_target(self._y, 0.0)
order_target(self._x, 0.0)
elif zscore < 0.0 and reference_pos < 0 and pnl > 0:
order_target(self._y, 0.0)
order_target(self._x, 0.0)
else:
if zscore > 1.5:
order_target_percent(self._y, -self.leverage / 2.)
order_target_percent(self._x, self.leverage / 2.)
self.entry_dt = now
if zscore < -1.5:
order_target_percent(self._y, self.leverage / 2.)
order_target_percent(self._x, -self.leverage / 2.)
self.entry_dt = now
except Exception as e:
log.debug("[{}] {}".format(self.name, str(e)))
def update(self):
prices = np.log(history(bar_count=1, frequency='1m', field='price'))
self.X.update(prices)
self.Y.update(prices)
self.kf.update(self.means_frame().iloc[-1])
def mean_spread(self):
means = self.means_frame()
beta, alpha = self.kf.state_mean
return means[self._y] - (beta * means[self._x] + alpha)
def means_frame(self):
mu_Y = self.Y.state_means
mu_X = self.X.state_means
return pd.DataFrame([mu_Y, mu_X]).T
def initialize_filters(self, context, data):
prices = np.log(history(self.initial_bars, self.freq, 'price'))
self.X.update(prices)
self.Y.update(prices)
# Drops the initial 0 mean value from the kalman filter
self.X.state_means = self.X.state_means.iloc[-self.initial_bars:]
self.Y.state_means = self.Y.state_means.iloc[-self.initial_bars:]
self.kf = KalmanRegression(self.Y.state_means, self.X.state_means,
delta=self.delta, maxlen=self.maxlen)
def get_pnl(self, context, data):
x = self._x
y = self._y
prices = history(1, '1d', 'price').iloc[-1]
positions = context.portfolio.positions
dx = prices[x] - positions[x].cost_basis
dy = prices[y] - positions[y].cost_basis
return (positions[x].amount * dx +
positions[y].amount * dy)
class KalmanMovingAverage(object):
"""
Estimates the moving average of a price process
via Kalman Filtering.
See http://pykalman.github.io/ for docs on the
filtering process.
"""
def __init__(self, asset, observation_covariance=1.0, initial_value=0,
initial_state_covariance=1.0, transition_covariance=0.05,
initial_window=20, maxlen=3000, freq='1d'):
self.asset = asset
self.freq = freq
self.initial_window = initial_window
self.maxlen = maxlen
self.kf = KalmanFilter(transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=initial_value,
initial_state_covariance=initial_state_covariance,
observation_covariance=observation_covariance,
transition_covariance=transition_covariance)
self.state_means = pd.Series([self.kf.initial_state_mean], name=self.asset)
self.state_vars = pd.Series([self.kf.initial_state_covariance], name=self.asset)
def update(self, observations):
for dt, observation in observations[self.asset].iterkv():
self._update(dt, observation)
def _update(self, dt, observation):
mu, cov = self.kf.filter_update(self.state_means.iloc[-1],
self.state_vars.iloc[-1],
observation)
self.state_means[dt] = mu.flatten()[0]
self.state_vars[dt] = cov.flatten()[0]
if self.state_means.shape[0] > self.maxlen:
self.state_means = self.state_means.iloc[-self.maxlen:]
if self.state_vars.shape[0] > self.maxlen:
self.state_vars = self.state_vars.iloc[-self.maxlen:]
class KalmanRegression(object):
"""
Uses a Kalman Filter to estimate regression parameters
in an online fashion.
Estimated model: y ~ beta * x + alpha
"""
def __init__(self, initial_y, initial_x, delta=1e-5, maxlen=3000):
self._x = initial_x.name
self._y = initial_y.name
self.maxlen = maxlen
trans_cov = delta / (1 - delta) * np.eye(2)
obs_mat = np.expand_dims(
np.vstack([[initial_x], [np.ones(initial_x.shape[0])]]).T, axis=1)
self.kf = KalmanFilter(n_dim_obs=1, n_dim_state=2,
initial_state_mean=np.zeros(2),
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat,
observation_covariance=1.0,
transition_covariance=trans_cov)
state_means, state_covs = self.kf.filter(initial_y.values)
self.means = pd.DataFrame(state_means,
index=initial_y.index,
columns=['beta', 'alpha'])
self.state_cov = state_covs[-1]
def update(self, observations):
x = observations[self._x]
y = observations[self._y]
mu, self.state_cov = self.kf.filter_update(
self.state_mean, self.state_cov, y,
observation_matrix=np.array([[x, 1.0]]))
mu = pd.Series(mu, index=['beta', 'alpha'],
name=observations.name)
self.means = self.means.append(mu)
if self.means.shape[0] > self.maxlen:
self.means = self.means.iloc[-self.maxlen:]
def get_spread(self, observations):
x = observations[self._x]
y = observations[self._y]
return y - (self.means.beta[-1] * x + self.means.alpha[-1])
@property
def state_mean(self):
return self.means.iloc[-1]
| apache-2.0 |
JoostVisser/ml-assignment2 | mglearn/plot_knn_regression.py | 1 | 1210 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import euclidean_distances
from mglearn.datasets import make_wave
def plot_knn_regression(n_neighbors=1):
X, y = make_wave(n_samples=40)
X_test = np.array([[-1.5], [0.9], [1.5]])
dist = euclidean_distances(X, X_test)
closest = np.argsort(dist, axis=0)
plt.figure(figsize=(10, 6))
reg = KNeighborsRegressor(n_neighbors=n_neighbors).fit(X, y)
y_pred = reg.predict(X_test)
for x, y_, neighbors in zip(X_test, y_pred, closest.T):
for neighbor in neighbors[:n_neighbors]:
plt.arrow(x[0], y_, X[neighbor, 0] - x[0], y[neighbor] - y_,
head_width=0, fc='k', ec='k')
train, = plt.plot(X, y, 'o')
test, = plt.plot(X_test, -3 * np.ones(len(X_test)), '*', c='g', markersize=20)
pred, = plt.plot(X_test, y_pred, '*', c='b', markersize=20)
plt.vlines(X_test, -3.1, 3.1, linestyle="--")
plt.legend([train, test, pred],
["training data/target", "test data", "test prediction"], ncol=3, loc=(.1, 1.025))
plt.ylim(-3.1, 3.1)
plt.xlabel("Feature")
plt.ylabel("Target")
| mit |
klingebj/regreg | doc/source/sphinxext/docscrape_sphinx.py | 2 | 7817 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
Eric89GXL/scikit-learn | sklearn/feature_selection/variance_threshold.py | 7 | 2438 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import atleast2d_or_csr
from ..utils.sparsefuncs import csr_mean_variance_axis0
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
`variances_` : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = atleast2d_or_csr(X, dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = csr_mean_variance_axis0(X)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
return self.variances_ > self.threshold
| bsd-3-clause |
schets/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
tcm129/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
gladk/trunk | doc/sphinx/tutorial/06-periodic-triaxial-test.py | 4 | 3492 | # encoding: utf-8
# periodic triaxial test simulation
#
# The initial packing is either
#
# 1. random cloud with uniform distribution, or
# 2. cloud with specified granulometry (radii and percentages), or
# 3. cloud of clumps, i.e. rigid aggregates of several particles
#
# The triaxial consists of 2 stages:
#
# 1. isotropic compaction, until sigmaIso is reached in all directions;
# this stage is ended by calling compactionFinished()
# 2. constant-strain deformation along the z-axis, while maintaining
# constant stress (sigmaIso) laterally; this stage is ended by calling
# triaxFinished()
#
# Controlling of strain and stresses is performed via PeriTriaxController,
# of which parameters determine type of control and also stability
# condition (maxUnbalanced) so that the packing is considered stabilized
# and the stage is done.
#
sigmaIso=-1e5
#import matplotlib
#matplotlib.use('Agg')
# generate loose packing
from yade import pack, qt, plot
O.periodic=True
sp=pack.SpherePack()
if 0:
## uniform distribution
sp.makeCloud((0,0,0),(2,2,2),rMean=.1,rRelFuzz=.3,periodic=True)
elif 0:
## per-fraction distribution
## passing: cummulative percentage
sp.particleSD2(radii=[.09,.1,.2],passing=[40,80,100],periodic=True,numSph=1000)
else:
## create packing from clumps
# configuration of one clump
c1=pack.SpherePack([((0,0,0),.03333),((.03,0,0),.017),((0,.03,0),.017)])
# make cloud using the configuration c1 (there could c2, c3, ...; selection between them would be random)
sp.makeClumpCloud((0,0,0),(2,2,2),[c1],periodic=True,num=500)
# setup periodic boundary, insert the packing
sp.toSimulation()
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
PeriTriaxController(label='triax',
# specify target values and whether they are strains or stresses
goal=(sigmaIso,sigmaIso,sigmaIso),stressMask=7,
# type of servo-control
dynCell=True,maxStrainRate=(10,10,10),
# wait until the unbalanced force goes below this value
maxUnbalanced=.1,relStressTol=1e-3,
# call this function when goal is reached and the packing is stable
doneHook='compactionFinished()'
),
NewtonIntegrator(damping=.2),
PyRunner(command='addPlotData()',iterPeriod=100),
]
O.dt=.5*utils.PWaveTimeStep()
def addPlotData():
plot.addData(unbalanced=utils.unbalancedForce(),i=O.iter,
sxx=triax.stress[0],syy=triax.stress[1],szz=triax.stress[2],
exx=triax.strain[0],eyy=triax.strain[1],ezz=triax.strain[2],
# save all available energy data
Etot=O.energy.total(),**O.energy
)
# enable energy tracking in the code
O.trackEnergy=True
# define what to plot
plot.plots={'i':('unbalanced',),'i ':('sxx','syy','szz'),' i':('exx','eyy','ezz'),
# energy plot
' i ':(O.energy.keys,None,'Etot'),
}
# show the plot
plot.plot()
def compactionFinished():
# set the current cell configuration to be the reference one
O.cell.trsf=Matrix3.Identity
# change control type: keep constant confinement in x,y, 20% compression in z
triax.goal=(sigmaIso,sigmaIso,-.3)
triax.stressMask=3
# allow faster deformation along x,y to better maintain stresses
triax.maxStrainRate=(1.,1.,.1)
# next time, call triaxFinished instead of compactionFinished
triax.doneHook='triaxFinished()'
# do not wait for stabilization before calling triaxFinished
triax.maxUnbalanced=10
def triaxFinished():
print 'Finished'
O.pause()
| gpl-2.0 |
ManuSchmi88/landlab | drivers/runfile_combined_soil.py | 1 | 20706 | """Landlab Driver for running Landscape Evolution Experiments with
- Soil weathering
- Soil diffusion
- Detachment-limited river erosion
- tectonic uplift
- vegetation modulation of erosion effects
Created by: Manuel Schmid, University of Tuebingen, 07.04.2017
"""
## Import necessary Python and Landlab Modules
import numpy as np
from landlab import RasterModelGrid
from landlab import CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
from landlab.components import FlowRouter
from landlab.components import DepthDependentDiffuser
from landlab.components import ExponentialWeatherer
from landlab.components import StreamPowerEroder
from landlab.components import LinearDiffuser
from landlab.components import FastscapeEroder
from landlab.components import DepressionFinderAndRouter
from landlab.components import drainage_density
from landlab import imshow_grid
from landlab.io.netcdf import write_netcdf
from landlab.io.netcdf import read_netcdf
from matplotlib import pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['agg.path.chunksize'] = 200000000
import time
#---------------------------Parameter Definitions------------------------------#
##Model Grid:##
ncols = 101
nrows = 101
dx = 100
##Model runtime:##
#Defines total model runtime
totalT = int(15.e6)
#Defines spinup-time before transient
ssT = int(15.e6)
#vegi step-function timestep
sfT = int(7.e6)
#vegi step-function amplitude
sfA = -0.3
#Defines time-step
dt = int(1000)
#Number of total-timestep (nt) and spin-up timesteps (ssnt)
nt = int(totalT / dt)
ssnt = int(ssT / dt)
ssntSF = int(sfT / dt)
#time-vector (total and transient), used for plotting later
timeVec = np.arange(0, totalT, dt)
transTimeVec = np.arange(0, (totalT - ssT), dt)
##Uplift:##
#upliftRate
upliftRate = 2e-4 #m/yr
uplift_per_step = upliftRate * dt
##Fluvial Erosion:
##Fluvial erosion is implement as stream-power-law
##E = K * A^m * S^n
##For comparison look up usual Tucker, Whipple etc.
#ksp = 0.4
ksp = 2e-7
msp = 0.5
nsp = 1
thresholdSP = 2.e-4
critArea = 1e6 #Minimum area for channel formation
##Hillslope Erosion:
##hillslope erosion is implement as linear diffusion which depends on soil depth
##for comparison look up Johnstone and Hilley 2014.
##Linear Diffusivity is modified after Istanbulluoglu 2015, and represented as as
##numpy-array. You need to define the "Base-diffusivity":
linDiffBase = 6e-2 #m^2/yr
alphaDiff = 0.2 #After Istanbulluoglu
soilTransportDepth = 1 #m
initalSoilDepth = 0 #m
#topographic_elevation - soil_depth
initialBedrockElevation = 0 #m
##Bedrock Weathering:
#weathering of bedrock is implemented as exponential weathering function after
#Ahnert 1976
maxSoilProductionRate = 0.00015 #m/yr
soilProductionDepth = 0.2 #m
##Vegetation Implementation
##This is stuff that is needed to use the vegetation-modulation after
##Istanbulluoglu 2015
aqDens = 1000. #Density of water
grav = 9.81 #Gravitation Acceleration
nSoil = 0.01 #Mannings Number for bare soil [-]
nVRef = 0.6 #Mannungs Number for Vegetation
vRef = 1. #Reference Vegetation for Maximum Mannings Roughness
w = 1. #Scaling Factor for vegetation influence
##Output production
##This Driver is set up to produce NetCDFs, .pngs, etc.
##For more detail look in the Plotting section of the main loop
##and comment stuff that you don't want produced
#Interval in which output is produced
outInt = 10000 #yrs
#Number of total produced outputs
no = totalT / outInt
#number of zeros for file_naming. Don't meddle with this.
zp = len(str(int(no)))
print("finished with parameter-initiation")
print("---------------------")
#---------------------------------Grid Setup-----------------------------------#
#This initiates a Modelgrid with dimensions nrows x ncols and spatial scaling of dx
mg = RasterModelGrid((nrows,ncols), dx)
#only uncomment this if there is a pre-existing topography you want to load.
#right now this only works if the topo was saved in numpys .npy format.
try:
topoSeed = np.load('topoSeed.npy')
print('loaded topoSeed.npy')
except:
print('There is no file containing a initial topography')
#Initate all the fields that are needed for calculations
mg.add_zeros('node','topographic__elevation')
mg.add_zeros('node','bedrock__elevation')
mg.at_node['bedrock__elevation'][:] = initialBedrockElevation
#checks if standart topo is used. if not creates own
if 'topoSeed' in locals():
mg.at_node['topographic__elevation'] += topoSeed
#mg.at_node['bedrock__elevation'] += topoSeed
print('Using pre-existing topography from file topoSeed.npy')
else:
mg.at_node['topographic__elevation'] += np.random.rand(mg.at_node.size)/10000
#mg.at_node['bedrock__elevation'] += np.random.rand(mg.at_node.size)/10000
print('No pre-existing topography. Creating own random noise topo.')
mg.add_zeros('node','soil__depth')
mg.at_node['soil__depth'][:] = initalSoilDepth
mg.add_zeros('node','vegetation__density')
#Create boundary conditions of the model grid (either closed or fixed-head)
for edge in (mg.nodes_at_left_edge,mg.nodes_at_right_edge, mg.nodes_at_top_edge):
mg.status_at_node[edge] = CLOSED_BOUNDARY
for edge in (mg.nodes_at_bottom_edge):
mg.status_at_node[edge] = FIXED_VALUE_BOUNDARY
print("finished with setup of modelgrid")
print("---------------------")
##---------------------------------Vegi implementation--------------------------#
##Set up a timeseries for vegetation-densities
##This basically assumes that for a spin-up time (ssT) we have constant vegetation
##cover (vp) and after that we get change vegetation cover as a sin-function
vp = .8
vegiTimeseries = np.zeros(int(totalT / dt)) + vp
#This part sets the modification of the vegi-distribution. comment/uncomment
#for usage
#this modifies the vegiTimeseries array with a sinusoidal curve
#vegiTimeseries[ssnt:] = (np.sin(0.000003 * transTimeVec) + 1) / 10 + (vp + 0.1)
#this incorporates a vegi step-function at timestep sfT with amplitude sfA
vegiTimeseries[ssntSF:] = vp - sfA
mg.at_node['vegetation__density'][:] = vp
#This maps the vegetation density on the nodes to the links between the nodes
vegiLinks = mg.map_mean_of_link_nodes_to_link('vegetation__density')
##These are the necesseray calculations for implementing the vegetation__density
##in the fluvial routines
nSoil_to_15 = np.power(nSoil, 1.5)
Ford = aqDens * grav * nSoil_to_15
n_v_frac = nSoil + (nVRef * ((mg.at_node['vegetation__density'] / vRef)**w)) #self.vd = VARIABLE!
#n_v_frac_to_w = np.power(n_v_frac, w)
#Prefect = np.power(n_v_frac_to_w, 0.9)
Prefect = np.power(n_v_frac, 0.9)
Kv = ksp * Ford/Prefect
##These are the calcultions to calculate the linear diffusivity based on vegis
linDiff = mg.zeros('node', dtype = float)
linDiff = linDiffBase * np.exp(-alphaDiff * vegiLinks)
print("finished setting up the vegetation fields and Kdiff and Kriv")
print("---------------------")
##---------------------------------Array initialization---------------------#
##This initializes all the arrays that are used to store data during the runtime
##of the model. this is mostly for plotting purposed and to create the .txt
##outputs. This potentially takes up a lot of space, so check if needed.
dhdtA = [] #Vector containing dhdt values for each node per timestep
meandhdt = [] #contains mean elevation change per timestep
mean_E = [] #contains the mean "erosion" rate out of Massbalance
mean_hill_E = [] #contains mean hillslope erosion rate
mean_riv_E = [] #contains mean river erosion rate
mean_dd = [] #contains mean drainage density
mean_K_riv = [] #contains mean K-value for spl
mean_K_diff = [] #contains mean K-value for ld
mean_slope = [] #mean slope within model-area
max_slope = [] #maximum slope within model area
min_slope = [] #minimum slope within model area
mean_elev = [] #mean elevation within model area
max_elev = [] #maximum elevation within model area
min_elev = [] #minimum elevation within model area
vegi_P_mean = [] #mostly for bugfixing because Manu is stupid fuckup without brain and life and fuck you
mean_SD = [] #mean soil depth
##---------------------------------Component initialization---------------------#
#sp = StreamPowerEroder(mg,
# K_sp = Kv,
# m_sp = msp,
# n_sp = nsp,
# threshold_sp=thresholdSP)
fc = FastscapeEroder(mg,
K_sp = Kv,
m_sp = msp,
n_sp = nsp,
threshold_sp = 0,
rainfall_intensity = 1)
fr = FlowRouter(mg)
lm = DepressionFinderAndRouter(mg)
expw = ExponentialWeatherer(mg,
max_soil_production_rate = maxSoilProductionRate,
soil_production_decay_depth = soilProductionDepth)
dld = DepthDependentDiffuser(mg,
linear_diffusivity = linDiff,
soil_transport_decay_depth = soilTransportDepth)
ld = LinearDiffuser(mg, linear_diffusivity = linDiff)
print("finished with the initialization of the erosion components")
print("---------------------")
##---------------------------------Main Loop------------------------------------#
t0 = time.time()
elapsed_time = 0
print("starting with main loop.")
print("---------------------")
#Create incremental counter for controlling progress of mainloop
counter = 0
#Create Limits for DHDT plot. Move this somewhere else later..
DHDTLowLim = upliftRate - (upliftRate * 1)
DHDTHighLim = upliftRate + (upliftRate * 1)
while elapsed_time < totalT:
#create copy of "old" topography
z0 = mg.at_node['topographic__elevation'].copy()
#Call the erosion routines.
#expw.run_one_step(dt=dt)
#dld.run_one_step(dt=dt)
ld.run_one_step(dt=dt)
fr.run_one_step()
lm.map_depressions()
floodedNodes = np.where(lm.flood_status==3)[0]
fc.run_one_step(dt=dt, flooded_nodes = floodedNodes)
mg.at_node['topographic__elevation'][mg.core_nodes] += uplift_per_step #add uplift
mg.at_node['bedrock__elevation'][mg.core_nodes] += uplift_per_step #add uplift
#look for nodes where river incises below current soil thickness
bad_nodes = mg.at_node['topographic__elevation'] < mg.at_node['bedrock__elevation']
#redefine bedrock to current channel elevation
mg.at_node['bedrock__elevation'][bad_nodes] = mg.at_node['topographic__elevation'][bad_nodes]
#calculate drainage_density
channel_mask = mg.at_node['drainage_area'] > critArea
dd = drainage_density.DrainageDensity(mg, channel__mask = channel_mask)
mean_dd.append(dd.calc_drainage_density())
#Calculate dhdt and E
dh = (mg.at_node['topographic__elevation'] - z0)
dhdt = dh/dt
erosionMatrix = upliftRate - dhdt
mean_E.append(np.mean(erosionMatrix))
#Calculate river erosion rate, based on critical area threshold
dh_riv = mg.at_node['topographic__elevation'][np.where(mg.at_node['drainage_area'] > critArea)]\
- z0[np.where(mg.at_node['drainage_area'] > critArea)]
dhdt_riv = dh_riv/dt
mean_riv_E.append(np.mean(upliftRate - dhdt_riv))
#Calculate hillslope erosion rate
dh_hill = mg.at_node['topographic__elevation'][np.where(mg.at_node['drainage_area'] <= critArea)]\
- z0[np.where(mg.at_node['drainage_area'] <= critArea)]
dhdt_hill = dh_hill/dt
mean_hill_E.append(np.mean(upliftRate - dhdt_hill))
#update vegetation__density
mg.at_node['vegetation__density'][:] = vegiTimeseries[int(elapsed_time/dt)-1]
vegiLinks = mg.map_mean_of_link_nodes_to_link('vegetation__density')
#update linDiff
linDiff = linDiffBase*np.exp(-alphaDiff * vegiLinks)
#reinitialize diffuser
# dld = DepthDependentDiffuser(mg,
# linear_diffusivity = linDiff,
# soil_transport_decay_depth = soilTransportDepth)
#
ld = LinearDiffuser(mg, linear_diffusivity = linDiff)
#update K_sp
n_v_frac = nSoil + (nVRef * (mg.at_node['vegetation__density'] / vRef)) #self.vd = VARIABLE!
n_v_frac_to_w = np.power(n_v_frac, w)
Prefect = np.power(n_v_frac_to_w, 0.9)
Kv = ksp * Ford/Prefect
fc = FastscapeEroder(mg,
K_sp = Kv,
m_sp = msp,
n_sp = nsp,
threshold_sp = thresholdSP,
rainfall_intensity = 1)
#Calculate and save mean K-values
#save mean_K_diff and mean_K_riv
mean_K_riv.append(np.mean(Kv))
mean_K_diff.append(np.mean(linDiff))
#Calculate and save mean, max, min slopes
mean_slope.append(np.mean(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
max_slope.append(np.max(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
min_slope.append(np.min(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
#calculate and save mean, max, min elevation
mean_elev.append(np.mean(mg.at_node['topographic__elevation'][mg.core_nodes]))
max_elev.append(np.max(mg.at_node['topographic__elevation'][mg.core_nodes]))
min_elev.append(np.min(mg.at_node['topographic__elevation'][mg.core_nodes]))
#SoilDepth
mean_SD.append(np.mean(mg.at_node['soil__depth'][mg.core_nodes]))
counter += 1
#print(counter)
#Run the output loop every outInt-times
if elapsed_time % outInt == 0:
print('Elapsed Time:' , elapsed_time,', writing output!')
##Create DEM
plt.figure()
imshow_grid(mg,'topographic__elevation',grid_units=['m','m'],var_name = 'Elevation',cmap='terrain')
plt.savefig('./DEM/DEM_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
##Create Flow Accumulation Map
plt.figure()
imshow_grid(mg,fr.drainage_area,grid_units=['m','m'],var_name =
'Drainage Area',cmap='bone')
plt.savefig('./ACC/ACC_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
##Create Slope - Area Map
plt.figure()
plt.loglog(mg.at_node['drainage_area'][np.where(mg.at_node['drainage_area'] > 0)],
mg.at_node['topographic__steepest_slope'][np.where(mg.at_node['drainage_area'] > 0)],
marker='.',linestyle='None')
plt.xlabel('Area')
plt.ylabel('Slope')
plt.savefig('./SA/SA_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
##Create NetCDF Output
#write_netcdf('./NC/output{}'.format(elapsed_time)+'__'+str(int(elapsed_time/outInt)).zfill(zp)+'.nc',
# mg,format='NETCDF4')
##Create erosion_diffmaps
plt.figure()
imshow_grid(mg,erosionMatrix,grid_units=['m','m'],var_name='Erosion m/yr',cmap='jet',limits=[DHDTLowLim,DHDTHighLim])
plt.savefig('./DHDT/eMap_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
#plt.figure()
#imshow_grid(mg,'soil__depth',grid_units=['m','m'],var_name=
# 'Elevation',cmap='terrain')
#plt.savefig('./SoilDepth/SD_'+str(int(elapsed_time/outInt)).zfill(zp)+'png')
#plt.close()
elapsed_time += dt #update elapsed time
tE = time.time()
print()
print('End of Main Loop. So far it took {}s to get here. No worries homeboy...'.format(tE-t0))
##---------------------------------Plotting-------------------------------------#
## OUTPUT OF EROSION RATES AND DIFFMAPS (BETA! NEEDS TO GO INTO SEPERATE CLASS
## TO KEEP RUNFILE NEAT AND SLEEK
#E-t:
#fig, ax1 = plt.subplots(figsize = [11,7])
#ax2 = ax1.twinx()
#ax1.plot(timeVec, mean_hill_E, 'k', alpha = 0.6, linewidth = 2.5)
#ax1.plot(timeVec, mean_riv_E, 'k--', alpha = 0.6, linewidth = 2.5)
##ax1.set_ylim([upliftRate*0.9,upliftRate*1.1])
#ax1.plot(timeVec, mean_E, 'r', linewidth = 4.7)
#ax2.plot(timeVec,100*vegiTimeseries,'g', linewidth = 4)
##ax2.set_ylim([0,100])
#ax1.set_xlabel('years', fontsize = 22)
#ax1.set_ylabel('Erosion rate', color='k', fontsize = 22)
#ax2.set_ylabel('Vegetation cover [%]', color='k', fontsize = 22)
#ax1.legend(['Hillslope Erosion','Fluvial Erosion', 'Total Erosion'], loc = 3, fontsize = 18)
#ax2.legend(['Vegetation Cover'], loc = 4, fontsize = 18)
#plt.savefig('./VegiEros_dualy.png',dpi = 720)
#plt.close()
#Plot Vegi_erosion_rate
fig, axarr = plt.subplots(6, sharex = True, figsize = [11,14])
axarr[0].plot(timeVec, vegiTimeseries,'g', linewidth = 2.5)
axarr[0].set_title('Mean Surface Vegetation', fontsize = 12)
axarr[0].set_ylabel('Vegetation cover')
axarr[1].plot(timeVec, mean_elev, 'k', linewidth = 2.5)
axarr[1].plot(timeVec, max_elev, 'k--', linewidth = 2, alpha = 0.5)
axarr[1].plot(timeVec, min_elev, 'k--', linewidth = 2, alpha = 0.5)
axarr[1].set_title('Mean Elevation', fontsize = 12)
axarr[1].set_ylabel('Mean Elevation [m]')
#axarr[1].set_ylim([0,80])
axarr[2].plot(timeVec, np.degrees(np.arctan(mean_slope)), 'r', linewidth = 2.5)
axarr[2].plot(timeVec, np.degrees(np.arctan(max_slope)), 'r--', linewidth = 2.0, alpha = 0.5)
axarr[2].plot(timeVec, np.degrees(np.arctan(min_slope)), 'r--', linewidth = 2.0, alpha = 0.5)
#axarr[2].set_ylim([0,10])
axarr[2].set_title('Mean Slope', fontsize = 12)
axarr[2].set_ylabel('Mean Slope [deg]')
axarr[3].plot(timeVec,mean_dd, 'b', linewidth = 2.5)
axarr[3].set_title('Mean Drainage Density')
axarr[3].set_ylabel('Drainage Density')
axarr[4].plot(timeVec, mean_hill_E, 'g--', linewidth = 2.0, alpha = 0.5)
axarr[4].plot(timeVec, mean_riv_E, 'b--', linewidth = 2.0, alpha = 0.5)
axarr[4].plot(timeVec, mean_E, 'r--', linewidth = 2.2, alpha = 0.8)
axarr[4].legend(['Hillsl.', 'Rivers','Mean'])
axarr[4].set_title("Erosion rates")
axarr[4].set_ylabel('Erosion rate [m/yr]')
axarr[5].plot(timeVec, mean_SD, 'k', linewidth = 2.5)
axarr[5].set_title("Soil Depth")
axarr[5].set_ylabel("Soil Depth [m]")
axarr[5].set_xlabel("Model Years", fontsize = 12)
plt.savefig('./Multiplot_absolut.png',dpi = 720)
plt.close()
#Multiplot with normalized differentations
#vegi_timeseries_diff = np.diff(vegiTimeseries)/(np.max(np.diff(vegiTimeseries)))
#mean_elev_diff = np.diff(mean_elev)/(np.max(np.abs((np.diff(mean_elev)))))
#mean_slope_diff = np.diff(mean_slope)/(np.max(np.abs(np.diff(mean_slope))))
#mean_hill_E_diff = np.diff(mean_hill_E)/(np.max(np.abs(np.diff(mean_hill_E))))
#mean_riv_E_diff = np.diff(mean_riv_E)/(np.max(np.abs(np.diff(mean_riv_E))))
#mean_E_diff = np.diff(mean_E)/np.max(np.abs(np.diff(mean_E)))
#mean_dd_diff = np.diff(mean_dd)/np.max(np.abs(np.diff(mean_dd)))
#timeVec_diff = np.delete(timeVec, -1)
#fig, axarr = plt.subplots(5, sharex = True, figsize = [11,14])
#axarr[0].plot(timeVec_diff, vegi_timeseries_diff,'g', linewidth = 2.5)
#axarr[0].plot(timeVec,vegiTimeseries,'g--',alpha=0.5)
#axarr[0].set_title('Change In Mean Surface Vegetation', fontsize = 12)
#axarr[0].set_ylabel('Vegetation cover change')
#axarr[1].plot(timeVec_diff, mean_elev_diff, 'k', linewidth = 2.5)
#axarr[1].set_title('Change In Mean Elevation', fontsize = 12)
#axarr[1].set_ylabel('dh/dt')
#axarr[2].plot(timeVec_diff, mean_slope_diff, 'r', linewidth = 2.5)
#axarr[2].set_title('Change In Mean Slope', fontsize = 12)
#axarr[2].set_ylabel('dS/dt')
#axarr[3].plot(timeVec_diff,mean_dd_diff, 'b', linewidth = 2.5)
#axarr[3].set_title('Change In Mean Drainage Density')
#axarr[3].set_ylabel('d(dd)/dt')
##axarr[4].plot(timeVec_diff, mean_hill_E_diff, 'g--', linewidth = 2.0, alpha = 0.5)
##axarr[4].plot(timeVec_diff, mean_riv_E_diff, 'b--', linewidth = 2.0, alpha = 0.5)
#axarr[4].plot(timeVec_diff, mean_E_diff, 'r--', linewidth = 2.2, alpha = 0.8)
##axarr[4].legend(['Hillsl.', 'Rivers','Mean'])
#axarr[4].set_title("Change In Erosion Rates")
#axarr[4].set_ylabel('dE/dt')
#axarr[4].set_xlabel('Model Years', fontsize = 12)
#plt.savefig('./Multiplot_diff.png',dpi = 720)
#plt.close()
#Save the most useful output arrays as CSV file for later plotting
np.savetxt('./CSVOutput/MeanSlope.csv', mean_slope)
np.savetxt('./CSVOutput/MaxElev.csv', max_slope)
np.savetxt('./CSVOutput/MeanElev.csv', mean_elev)
np.savetxt('./CSVOutput/MaxElev.csv', max_elev)
np.savetxt('./CSVOutput/MeanRiverErosion.csv', mean_riv_E)
np.savetxt('./CSVOutput/MeanHillslopeErosion.csv', mean_hill_E)
np.savetxt('./CSVOutput/MeanErosion.csv', mean_E)
np.savetxt('./CSVOutput/VegetationDensity.csv', vegiTimeseries)
np.savetxt('./CSVOutput/Timeseries.csv', timeVec)
np.savetxt('./CSVOutput/Vegi_bugfix.csv', vegi_P_mean)
np.savetxt('./CSVOutput/MeanSoilthick.csv', mean_SD)
#bugfixing because manu is a fucking dumb asshole
plt.plot(vegi_P_mean)
plt.savefig('./vegi_P_bugfix.png', dpi = 720)
plt.close()
print("FINALLY! TADA! IT IS DONE! LOOK AT ALL THE OUTPUT I MADE!!!!")
| mit |
sourabhdattawad/BuildingMachineLearningSystemsWithPython | ch07/boston1.py | 22 | 1147 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script shows an example of simple (ordinary) linear regression
# The first edition of the book NumPy functions only for this operation. See
# the file boston1numpy.py for that version.
import numpy as np
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
boston = load_boston()
x = boston.data
y = boston.target
# Fitting a model is trivial: call the ``fit`` method in LinearRegression:
lr = LinearRegression()
lr.fit(x, y)
# The instance member `residues_` contains the sum of the squared residues
rmse = np.sqrt(lr.residues_/len(x))
print('RMSE: {}'.format(rmse))
fig, ax = plt.subplots()
# Plot a diagonal (for reference):
ax.plot([0, 50], [0, 50], '-', color=(.9,.3,.3), lw=4)
# Plot the prediction versus real:
ax.scatter(lr.predict(x), boston.target)
ax.set_xlabel('predicted')
ax.set_ylabel('real')
fig.savefig('Figure_07_08.png')
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/frame/test_explode.py | 2 | 3212 | import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
def test_error():
df = pd.DataFrame(
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
)
with pytest.raises(ValueError):
df.explode(list("AA"))
df.columns = list("AA")
with pytest.raises(ValueError):
df.explode("A")
def test_basic():
df = pd.DataFrame(
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
)
result = df.explode("A")
expected = pd.DataFrame(
{
"A": pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object
),
"B": 1,
}
)
tm.assert_frame_equal(result, expected)
def test_multi_index_rows():
df = pd.DataFrame(
{"A": np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), "B": 1},
index=pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]),
)
result = df.explode("A")
expected = pd.DataFrame(
{
"A": pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4],
index=pd.MultiIndex.from_tuples(
[
("a", 1),
("a", 1),
("a", 1),
("a", 2),
("b", 1),
("b", 2),
("b", 2),
]
),
dtype=object,
),
"B": 1,
}
)
tm.assert_frame_equal(result, expected)
def test_multi_index_columns():
df = pd.DataFrame(
{("A", 1): np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), ("A", 2): 1}
)
result = df.explode(("A", 1))
expected = pd.DataFrame(
{
("A", 1): pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4],
index=pd.Index([0, 0, 0, 1, 2, 3, 3]),
dtype=object,
),
("A", 2): 1,
}
)
tm.assert_frame_equal(result, expected)
def test_usecase():
# explode a single column
# gh-10511
df = pd.DataFrame(
[[11, range(5), 10], [22, range(3), 20]], columns=list("ABC")
).set_index("C")
result = df.explode("B")
expected = pd.DataFrame(
{
"A": [11, 11, 11, 11, 11, 22, 22, 22],
"B": np.array([0, 1, 2, 3, 4, 0, 1, 2], dtype=object),
"C": [10, 10, 10, 10, 10, 20, 20, 20],
},
columns=list("ABC"),
).set_index("C")
tm.assert_frame_equal(result, expected)
# gh-8517
df = pd.DataFrame(
[["2014-01-01", "Alice", "A B"], ["2014-01-02", "Bob", "C D"]],
columns=["dt", "name", "text"],
)
result = df.assign(text=df.text.str.split(" ")).explode("text")
expected = pd.DataFrame(
[
["2014-01-01", "Alice", "A"],
["2014-01-01", "Alice", "B"],
["2014-01-02", "Bob", "C"],
["2014-01-02", "Bob", "D"],
],
columns=["dt", "name", "text"],
index=[0, 0, 1, 1],
)
tm.assert_frame_equal(result, expected)
| apache-2.0 |
OshynSong/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
xavierwu/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
loli/semisupervisedforests | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.