repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
deepinsight/Deformable-ConvNets | deeplab/runs_CAIScene/infer3.py | 1 | 9886 | import os
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import sys
import argparse
import numpy as np
import cv2
import math
import datetime
import random
import json
import pandas as pd
#import multiprocessing
from Queue import Queue
from threading import Thread
import mxnet as mx
import mxnet.ndarray as nd
from easydict import EasyDict as edict
parser = argparse.ArgumentParser(description="",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lst', type=str, default='val.lst',
help='')
parser.add_argument('--val-root-path', type=str, default='/data1/deepinsight/aichallenger/scene/ai_challenger_scene_validation_20170908/scene_validation_images_20170908')
parser.add_argument('--test-root-path', type=str, default='/data1/deepinsight/aichallenger/scene/ai_challenger_scene_test_a_20170922/scene_test_a_images_20170922')
parser.add_argument('--gpu', type=int, default=0,
help='')
parser.add_argument('--gpus', type=str, default='0,1,2',
help='')
parser.add_argument('--num-classes', type=int, default=80,
help='')
parser.add_argument('--batch-size', type=int, default=120,
help='')
parser.add_argument('--mode', type=int, default=0,
help='')
parser.add_argument('--size', type=str, default='448,504')
#parser.add_argument('--size', type=str, default='224,256')
parser.add_argument('--step', type=int, default=-32,
help='if negative, use random crops')
#parser.add_argument('--model', type=str, default='./model/ft448deformsqex0.0001_9682,3|./model/sft320deformsqex_9692,1')
#parser.add_argument('--model', type=str, default='./model/sft320deformsqex_9692,1')
#parser.add_argument('--model', type=str, default='./model/ft224deformsqex0003_9587,20')
#parser.add_argument('--model', type=str, default='./model/a1,8,14')
#parser.add_argument('--model', type=str, default='./model/a1_2,2,6')
#parser.add_argument('--model', type=str, default='./model/a1_6,1')
#parser.add_argument('--model', type=str, default='./model/a1_6,1|./model/a1_4,3|./model/a1_5,6|./model/a1_7,2')
#parser.add_argument('--model', type=str, default='./model/a1_6,6|./model/a1_4,6|./model/a1_5,6|./model/a1_7,6')
#parser.add_argument('--model', type=str, default='./model/sft448from32097nude00003_9740,11,448')
#parser.add_argument('--model', type=str, default='./model/ft224nude0003_97,50,224|./model/sft448from32097nude00003_9740,11,448|./model/sft320nude00003_97,19,320')
#parser.add_argument('--model', type=str, default='./model/ft224nude0003_97,50,224|./model/sft448from32097nude00003_9740,11,448')
parser.add_argument('--model', type=str, default='ft224nude0003_9685,27,224|sft448from32097nude00003_9740,11,448') # #ft224nude0003_97,50,224|
#parser.add_argument('--model', type=str, default='sft320nude00003_9736,5,320|sft640from448974nude00002_973,12,640')
parser.add_argument('--output-dir', type=str, default='',
help='')
args = parser.parse_args()
def prt(msg):
ts = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("%s] %s" % (ts, msg))
sys.stdout.flush()
def ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def image_preprocess(img_full_path):
_size = args.size.split(",")
img_sz = int(_size[1])
crop_sz = int(_size[0])
#print(img_full_path)
img = cv2.cvtColor(cv2.imread(img_full_path), cv2.COLOR_BGR2RGB)
img = np.float32(img)
ori_shape = img.shape
assert img.shape[2]==3
rows, cols = img.shape[:2]
_high = min(rows, cols)
_high = min(_high, crop_sz*2)
_high = max(_high, img_sz)
_img_sz = img_sz
if _high>img_sz:
_img_sz = np.random.randint(low=img_sz, high=_high)
if cols < rows:
resize_width = _img_sz
resize_height = resize_width * rows / cols;
else:
resize_height = _img_sz
resize_width = resize_height * cols / rows;
img = cv2.resize(img, (resize_width, resize_height), interpolation=cv2.INTER_CUBIC)
#print(_high,ori_shape,img.shape)
h, w, _ = img.shape
#x0 = int((w - crop_sz) / 2)
#y0 = int((h - crop_sz) / 2)
x0_side = int((w-crop_sz)/4)
y0_side = int((h-crop_sz)/4)
x0_max = w-crop_sz
y0_max = h-crop_sz
x0 = np.random.randint(low=x0_side, high=x0_max-x0_side)
y0 = np.random.randint(low=y0_side, high=y0_max-y0_side)
img = img[y0:y0+crop_sz, x0:x0+crop_sz, :]
#lr flip
if random.randint(0,1)==1:
for j in xrange(3):
img[:,:,j] = np.fliplr(img[:,:,j])
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to CHW
return img
def read_image(path):
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
img = np.float32(img)
return img
def image_preprocess2(img, crop_sz):
nd_img = nd.array(img)
img_sz = crop_sz+random.randint(8,32)
if args.step==0:
img_sz = crop_sz+32
if img_sz>0:
nd_img = mx.image.resize_short(nd_img, img_sz)
#nd_img = mx.image.random_size_crop(nd_img, (crop_sz, crop_sz), 0.08, (3.0/4, 4.0/3))[0]
if args.step==0:
nd_img = mx.image.center_crop(nd_img, (crop_sz, crop_sz))[0]
else:
nd_img = mx.image.random_crop(nd_img, (int((img_sz+crop_sz)/2),int((img_sz+crop_sz)/2)) )[0]
nd_img = mx.image.center_crop(nd_img, (crop_sz, crop_sz))[0]
if random.random()<0.5:
nd_img = nd.flip(nd_img, axis=1)
img = nd_img.asnumpy()
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to CHW
#print(img.shape)
return img
def val(X, imgs):
top1=0
top3=0
for ii in range(X.shape[0]):
score = X[ii]
gt_label = imgs[ii][1]
#print("%d sum %f" % (ii, _sum))
sort_index = np.argsort(score)[::-1]
for k in xrange(3):
if sort_index[k]==gt_label:
top3+=1
if k==0:
top1+=1
print('top3', float(top3)/X.shape[0])
print('top1', float(top1)/X.shape[0])
if args.mode>0:
args.root_path = args.test_root_path
else:
args.root_path = args.val_root_path
args.crop_size = int(args.size.split(',')[0])
args.resize = int(args.size.split(',')[1])
#ctxs = [mx.gpu(int(i)) for i in args.gpus.split(',')]
nets = []
gpuid = args.gpu
ctx = mx.gpu(gpuid)
for model_str in args.model.split('|'):
vec = model_str.split(',')
assert len(vec)>1
prefix = vec[0]
epoch = int(vec[1])
crop_sz = int(vec[2])
print('loading',prefix, epoch)
net = edict()
net.crop_sz = crop_sz
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(prefix, epoch)
net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx)
nets.append(net)
#gpuid+=1
imgs = []
i = 0
for line in open(args.lst, 'r'):
vec = line.strip().split("\t")
imgs.append( (i, int(vec[1]), os.path.join(args.root_path, vec[2])) )
i+=1
#models = []
#for net in nets:
# model = mx.mod.Module(
# context = ctxs,
# symbol = net.sym,
# )
# hw = int(args.size.split(',')[0])
# model.bind(data_shapes=[('data', (args.batch_size, 3, hw, hw))], label_shapes=[('softmax_label',(args.batch_size,))], for_training=False, grad_req="null")
# model.set_params(net.arg_params, net.aux_params)
# models.append(model)
X = np.zeros( (len(imgs), args.num_classes) , dtype=np.float32 )
num_batches = int( math.ceil(len(imgs) / args.batch_size) )
print("num_batches %d" % num_batches)
nrof_loops = args.step*-1
if args.step==0:
nrof_loops = 1
for loop in xrange(nrof_loops):
print('start loop', loop)
batch_head = 0
batch_num = 0
while batch_head<len(imgs):
prt("processing batch %d" % batch_num)
current_batch_sz = min(args.batch_size, len(imgs)-batch_head)
#print batch_head
ids = []
datas = []
for index in range(batch_head, batch_head+current_batch_sz):
img_path = imgs[index][2]
data = read_image(img_path)
datas.append(data)
ids.append(imgs[index][0])
#for model in models:
for net in nets:
input_blob = np.zeros((current_batch_sz,3,net.crop_sz,net.crop_sz))
for idx in xrange(len(datas)):
data = datas[idx]
img = image_preprocess2(data, net.crop_sz)
input_blob[idx,:,:,:] = img
net.arg_params["data"] = mx.nd.array(input_blob, net.ctx)
net.arg_params["softmax_label"] = mx.nd.empty((current_batch_sz,), net.ctx)
exe = net.sym.bind(net.ctx, net.arg_params ,args_grad=None, grad_req="null", aux_states=net.aux_params)
exe.forward(is_train=False)
net_out = exe.outputs[0].asnumpy()
#_data = mx.nd.array(input_blob)
#_label = nd.ones( (current_batch_sz,) )
#db = mx.io.DataBatch(data=(_data,), label=(_label,))
#model.forward(db, is_train=False)
#net_out = model.get_outputs()[0].asnumpy()
#print(net_out.shape)
for bz in xrange(current_batch_sz):
probs = net_out[bz,:]
score = np.squeeze(probs)
#print(score.shape)
#print(score)
im_id = ids[bz]
X[im_id,:] += score
batch_head += current_batch_sz
batch_num += 1
val(X, imgs)
top1 = 0
top5 = 0
if args.mode==0:
val(X, imgs)
else:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir,'result.json'), 'w') as opfile:
json_data = []
for ii in range(X.shape[0]):
score = X[ii]
#print("%d sum %f" % (ii, _sum))
sort_index = np.argsort(score)[::-1]
top_k = list(sort_index[0:3])
_data = {'image_id' : imgs[ii][2].split('/')[-1], 'label_id': top_k}
json_data.append(_data)
opfile.write(json.dumps(json_data))
out_filename = os.path.join(args.output_dir, 'result.hdf')
print(out_filename)
if os.path.exists(out_filename):
print("exists, delete first..")
os.remove(out_filename)
_X = X/float(nrof_loops)
print("_X row sum %f" % np.sum(_X[0]))
df = pd.DataFrame(_X)
df.to_hdf(out_filename, "result")
| apache-2.0 |
wanggang3333/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
hainm/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
conversationai/conversationai-moderator-reddit | perspective_reddit_bot/compute_bot_metrics_test.py | 1 | 2559 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""compute_bot_metrics tests"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import pandas as pd
import compute_bot_metrics
class ComputeBotMetricsTest(unittest.TestCase):
def test_process_modactions_frame(self):
raw_df = pd.DataFrame({
'removed': [True, False, np.NaN],
'rule:hitox': ['report', 'noop', 'report'],
'rule:medtox': ['noop', 'noop', 'rule-not-triggered'],
})
cleaned_df = compute_bot_metrics.process_modactions_frame(raw_df)
# Should drop the last row due to nan in removed.
self.assertEqual(2, len(cleaned_df))
# Should convert rule columns to booleans.
self.assertEqual([True, False], list(cleaned_df['rule:hitox']))
self.assertEqual([False, False], list(cleaned_df['rule:medtox']))
def test_compute_rule_metrics(self):
df = pd.DataFrame({
'removed': [True, True, False, False],
'rule:hitox': [True, False, False, False],
'rule:medtox': [True, True, True, True],
})
metrics = compute_bot_metrics.compute_rule_metrics(df)
expected_df = pd.DataFrame({
'rule': ['hitox', 'medtox', '~overall~'],
'precision': [1.0, 0.5, 0.5],
'recall': [0.5, 1.0, 1.0],
'flags': [1, 4, 4],
}, columns=['rule', 'precision', 'recall', 'flags'])
pd.testing.assert_frame_equal(expected_df, metrics)
def test_compute_pr_table(self):
df = pd.DataFrame({
'removed': [True, True, False, False],
'score:tox': [1.0, 0.5, 0.6, 0.0],
})
pr_table = compute_bot_metrics.compute_pr_table(df, 'score:tox', 10)
expected_df = pd.DataFrame({
'precision': [0.6666666666666666, 0.5, 1.0],
'recall': [1.0, 0.5, 0.5],
'threshold': [0.5, 0.6, 1.0],
}, columns=['precision', 'recall', 'threshold'])
pd.testing.assert_frame_equal(expected_df, pr_table)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
xiaoxiamii/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
kdebrab/pandas | pandas/tests/series/test_datetime_values.py | 3 | 19069 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import locale
import calendar
import pytest
from datetime import datetime, date
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
from pandas import (Index, Series, DataFrame, bdate_range,
date_range, period_range, timedelta_range,
PeriodIndex, DatetimeIndex, TimedeltaIndex)
import pandas.core.common as com
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesDatetimeValues(TestData):
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_period = PeriodIndex._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'day_name', 'month_name']
ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
assert a == b
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series(date_range('20130101', periods=5), name='xxx'),
Series(date_range('20130101', periods=5, freq='s'),
name='xxx'),
Series(date_range('20130101 00:00:00', periods=5, freq='ms'),
name='xxx')]
for s in cases:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'US/Eastern'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
exp_values = (DatetimeIndex(s.values).tz_localize('UTC')
.tz_convert('US/Eastern'))
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
# round
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.round('D')
expected = Series(pd.to_datetime(['2012-01-02', '2012-01-02',
'2012-01-01']), name='xxx')
tm.assert_series_equal(result, expected)
# round with tz
result = (s.dt.tz_localize('UTC')
.dt.tz_convert('US/Eastern')
.dt.round('D'))
exp_values = pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']).tz_localize('US/Eastern')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(result, expected)
# floor
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.floor('D')
expected = Series(pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']), name='xxx')
tm.assert_series_equal(result, expected)
# ceil
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.ceil('D')
expected = Series(pd.to_datetime(['2012-01-02', '2012-01-02',
'2012-01-02']), name='xxx')
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101', periods=5, tz='US/Eastern'),
name='xxx')
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'CET'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# timedelta index
cases = [Series(timedelta_range('1 day', periods=5),
index=list('abcde'), name='xxx'),
Series(timedelta_range('1 day 01:23:45', periods=5,
freq='s'), name='xxx'),
Series(timedelta_range('2 days 01:23:45.012345', periods=5,
freq='ms'), name='xxx')]
for s in cases:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, s.index)
result = s.dt.to_pytimedelta()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.total_seconds()
assert isinstance(result, pd.Series)
assert result.dtype == 'float64'
freq_result = s.dt.freq
assert freq_result == TimedeltaIndex(s.values, freq='infer').freq
# both
index = date_range('20130101', periods=3, freq='D')
s = Series(date_range('20140204', periods=3, freq='s'),
index=index, name='xxx')
exp = Series(np.array([2014, 2014, 2014], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.year, exp)
exp = Series(np.array([2, 2, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.month, exp)
exp = Series(np.array([0, 1, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.second, exp)
exp = pd.Series([s[0]] * 3, index=index, name='xxx')
tm.assert_series_equal(s.dt.normalize(), exp)
# periodindex
cases = [Series(period_range('20130101', periods=5, freq='D'),
name='xxx')]
for s in cases:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_period_methods:
getattr(s.dt, prop)
freq_result = s.dt.freq
assert freq_result == PeriodIndex(s.values).freq
# test limited display api
def get_dir(s):
results = [r for r in s.dt.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101', periods=5,
freq='D', name='xxx').astype(object))
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_period + ok_for_period_methods))))
# 11295
# ambiguous time error on the conversions
s = Series(pd.date_range('2015-01-01', '2016-01-01',
freq='T'), name='xxx')
s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
exp_values = pd.date_range('2015-01-01', '2016-01-01', freq='T',
tz='UTC').tz_convert('America/Chicago')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(s, expected)
# no setting allowed
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
with tm.assert_raises_regex(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment', 'raise'):
def f():
s.dt.hour[0] = 5
pytest.raises(com.SettingWithCopyError, f)
def test_dt_namespace_accessor_categorical(self):
# GH 19468
dti = DatetimeIndex(['20171111', '20181212']).repeat(2)
s = Series(pd.Categorical(dti), name='foo')
result = s.dt.year
expected = Series([2017, 2017, 2018, 2018], name='foo')
tm.assert_series_equal(result, expected)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
s = Series(date_range('20130101', periods=5, freq='D'))
with tm.assert_raises_regex(AttributeError,
"You cannot add any new attribute"):
s.dt.xlabel = "a"
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_dt_accessor_datetime_name_accessors(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
expected_months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September',
'October', 'November', 'December']
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
s = Series(DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
periods=365))
english_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
for day, name, eng_name in zip(range(4, 11),
expected_days,
english_days):
name = name.capitalize()
assert s.dt.weekday_name[day] == eng_name
assert s.dt.day_name(locale=time_locale)[day] == name
s = s.append(Series([pd.NaT]))
assert np.isnan(s.dt.day_name(locale=time_locale).iloc[-1])
s = Series(DatetimeIndex(freq='M', start='2012', end='2013'))
result = s.dt.month_name(locale=time_locale)
expected = Series([month.capitalize() for month in expected_months])
tm.assert_series_equal(result, expected)
for s_date, expected in zip(s, expected_months):
result = s_date.month_name(locale=time_locale)
assert result == expected.capitalize()
s = s.append(Series([pd.NaT]))
assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1])
def test_strftime(self):
# GH 10086
s = Series(date_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33',
'2015/02/05 11-22-33', '2015/02/06 11-22-33',
'2015/02/07 11-22-33'])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(period_range(
'2015-02-03 11:22:33.4567', periods=5, freq='s'))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34',
'2015/02/03 11-22-35', '2015/02/03 11-22-36',
'2015/02/03 11-22-37'])
tm.assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5))
s.iloc[0] = pd.NaT
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04',
'2013/01/05'])
tm.assert_series_equal(result, expected)
datetime_index = date_range('20150301', periods=5)
result = datetime_index.strftime("%Y/%m/%d")
expected = Index(['2015/03/01', '2015/03/02', '2015/03/03',
'2015/03/04', '2015/03/05'], dtype=np.object_)
# dtype may be S10 or U10 depending on python version
tm.assert_index_equal(result, expected)
period_index = period_range('20150301', periods=5)
result = period_index.strftime("%Y/%m/%d")
expected = Index(['2015/03/01', '2015/03/02', '2015/03/03',
'2015/03/04', '2015/03/05'], dtype='=U10')
tm.assert_index_equal(result, expected)
s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14,
32, 1)])
result = s.dt.strftime('%Y-%m-%d %H:%M:%S')
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=4, freq='H'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S')
expected = Series(["2013/01/01 00:00:00", "2013/01/01 01:00:00",
"2013/01/01 02:00:00", "2013/01/01 03:00:00"])
s = Series(period_range('20130101', periods=4, freq='L'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')
expected = Series(["2013/01/01 00:00:00.000",
"2013/01/01 00:00:00.001",
"2013/01/01 00:00:00.002",
"2013/01/01 00:00:00.003"])
tm.assert_series_equal(result, expected)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101', periods=5, freq='D'))
s.iloc[2] = pd.NaT
for attr in ['microsecond', 'nanosecond', 'second', 'minute', 'hour',
'day']:
expected = getattr(s.dt, attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt, attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series(
[date(2013, 1, 1), date(2013, 1, 2), np.nan, date(2013, 1, 4),
date(2013, 1, 5)], dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series(
[time(0), time(0), np.nan, time(0), time(0)], dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.core.indexes.accessors import (
CombinedDatetimelikeProperties, DatetimeProperties)
assert Series.dt is CombinedDatetimelikeProperties
s = Series(date_range('2000-01-01', periods=3))
assert isinstance(s.dt, DatetimeProperties)
for s in [Series(np.arange(5)), Series(list('abcde')),
Series(np.random.randn(5))]:
with tm.assert_raises_regex(AttributeError,
"only use .dt accessor"):
s.dt
assert not hasattr(s, 'dt')
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).astype(object))
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
def test_date_tz(self):
# GH11757
rng = pd.DatetimeIndex(['2014-04-04 23:56',
'2014-07-18 21:24',
'2015-11-22 22:14'], tz="US/Eastern")
s = Series(rng)
expected = Series([date(2014, 4, 4),
date(2014, 7, 18),
date(2015, 11, 22)])
assert_series_equal(s.dt.date, expected)
assert_series_equal(s.apply(lambda x: x.date()), expected)
def test_datetime_understood(self):
# Ensures it doesn't fail to create the right series
# reported in issue#16726
series = pd.Series(pd.date_range("2012-01-01", periods=3))
offset = pd.offsets.DateOffset(days=6)
result = series - offset
expected = pd.Series(pd.to_datetime([
'2011-12-26', '2011-12-27', '2011-12-28']))
tm.assert_series_equal(result, expected)
| bsd-3-clause |
shujingke/opencog | opencog/python/spatiotemporal/temporal_events/membership_function.py | 34 | 4673 | from math import fabs
from random import random
from scipy.stats.distributions import rv_frozen
from spatiotemporal.time_intervals import TimeInterval
from spatiotemporal.unix_time import random_time, UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.functions import Function, FunctionPiecewiseLinear,\
FunctionHorizontalLinear, FunctionComposite, FUNCTION_ZERO, FUNCTION_ONE, FunctionLinear
from numpy import PINF as POSITIVE_INFINITY, NINF as NEGATIVE_INFINITY
from utility.numeric.globals import EPSILON
__author__ = 'keyvan'
class MembershipFunction(Function):
def __init__(self, temporal_event):
Function.__init__(self, function_undefined=FUNCTION_ZERO, domain=temporal_event)
def call_on_single_point(self, time_step):
return self.domain.distribution_beginning.cdf(time_step) - self.domain.distribution_ending.cdf(time_step)
class ProbabilityDistributionPiecewiseLinear(list, TimeInterval, rv_frozen):
dist = 'ProbabilityDistributionPiecewiseLinear'
_mean = None
asd = None
def __init__(self, dictionary_input_output):
cdf_input_list, cdf_output_list = convert_dict_to_sorted_lists(dictionary_input_output)
list.__init__(self, cdf_input_list)
TimeInterval.__init__(self, self[0], self[-1], 2)
self.cdf = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
self.cdf.dictionary_bounds_function[(self.b, POSITIVE_INFINITY)] = FUNCTION_ONE
pdf_output_list = []
dictionary_bounds_function = {}
for bounds in sorted(self.cdf.dictionary_bounds_function):
a, b = bounds
if a in [NEGATIVE_INFINITY, POSITIVE_INFINITY] or b in [NEGATIVE_INFINITY, POSITIVE_INFINITY]:
continue
pdf_y_intercept = fabs(self.cdf.derivative((a + b) / 2.0))
pdf_output_list.append(pdf_y_intercept)
dictionary_bounds_function[bounds] = FunctionHorizontalLinear(pdf_y_intercept)
self.pdf = FunctionComposite(dictionary_bounds_function, function_undefined=FUNCTION_ZERO, domain=self,
is_normalised=True)
self.roulette_wheel = []
self._mean = 0
for bounds in sorted(self.pdf.dictionary_bounds_function):
(a, b) = bounds
if a in [NEGATIVE_INFINITY, POSITIVE_INFINITY] and b in [NEGATIVE_INFINITY, POSITIVE_INFINITY]:
continue
cdf = self.cdf.dictionary_bounds_function[bounds]
pdf = self.pdf.dictionary_bounds_function[bounds]
share = cdf(b)
self.roulette_wheel.append((a, b, share))
self._mean += (a + b) / 2.0 * pdf(a) * (b - a)
def std(self):
# Not properly implemented
return 0
def stats(self, moments='mv'):
# Not properly implemented
# m, s, k
return self.mean(), 0, 0
def mean(self):
return self._mean
def interval(self, alpha):
if alpha == 1:
return self.a, self.b
raise NotImplementedError("'interval' is not implemented for 'alpha' other than 1")
def rvs(self, size=None):
if size is None:
size = 1
else:
assert isinstance(size, int)
result = []
start, end = 0, 0
for i in xrange(size):
rand = random()
for a, b, share in self.roulette_wheel:
if rand < share:
start, end = a, b
break
result.append(random_time(start, end))
if size == 1:
return result[0]
return result
# def plot(self):
# import matplotlib.pyplot as plt
# x_axis, y_axis = [], []
# for time_step in self:
# x_axis.append(UnixTime(time_step - EPSILON).to_datetime())
# x_axis.append(UnixTime(time_step + EPSILON).to_datetime())
# y_axis.append(self.pdf(time_step - EPSILON))
# y_axis.append(self.pdf(time_step + EPSILON))
# plt.plot(x_axis, y_axis)
# return plt
def plot(self):
import matplotlib.pyplot as plt
x_axis, y_axis = [], []
for time_step in self:
x_axis.append(time_step - EPSILON)
x_axis.append(time_step + EPSILON)
y_axis.append(self.pdf(time_step - EPSILON))
y_axis.append(self.pdf(time_step + EPSILON))
plt.plot(x_axis, y_axis)
return plt
def __hash__(self):
return object.__hash__(self)
def __repr__(self):
return TimeInterval.__repr__(self)
def __str__(self):
return repr(self)
| agpl-3.0 |
glneo/gnuradio-davisaf | gr-digital/examples/example_timing.py | 17 | 7791 | #!/usr/bin/env python
from gnuradio import gr, digital
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
from scipy import fftpack
class example_timing(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise,
foffset, toffset, poffset, mode=0):
gr.top_block.__init__(self)
rrc_taps = gr.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
gain = 2*scipy.pi/100.0
nfilts = 32
rrc_taps_rx = gr.firdes.root_raised_cosine(
nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = gr.vector_source_c(data.tolist(), False)
self.rrc = gr.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = gr.channel_model(noise, foffset, toffset)
self.off = gr.fractional_interpolator_cc(0.20, 1.0)
if mode == 0:
self.clk = gr.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx,
nfilts, nfilts//2, 3.5)
self.taps = self.clk.get_taps()
self.dtaps = self.clk.get_diff_taps()
self.vsnk_err = gr.vector_sink_f()
self.vsnk_rat = gr.vector_sink_f()
self.vsnk_phs = gr.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.connect((self.clk,2), self.vsnk_rat)
self.connect((self.clk,3), self.vsnk_phs)
else: # mode == 1
mu = 0.5
gain_mu = 0.1
gain_omega = 0.25*gain_mu*gain_mu
omega_rel_lim = 0.02
self.clk = digital.clock_recovery_mm_cc(sps, gain_omega,
mu, gain_mu,
omega_rel_lim)
self.vsnk_err = gr.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_clk = gr.vector_sink_c()
self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk)
self.connect(self.off, self.vsnk_src)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
parser.add_option("-M", "--mode", type="int", default=0,
help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_timing(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset,
options.mode)
put.run()
if options.mode == 0:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
data_rat = scipy.array(put.vsnk_rat.data()[20:])
data_phs = scipy.array(put.vsnk_phs.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "bo")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bo-")
s2.plot(data_clk.real, "ro")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
# Plot the clock recovery loop's error
s4 = f1.add_subplot(2,2,4)
s4.plot(data_phs)
s4.set_title("Clock Recovery Loop Filter Phase")
s4.set_xlabel("Samples")
s4.set_ylabel("Filter Phase")
diff_taps = put.dtaps
ntaps = len(diff_taps[0])
nfilts = len(diff_taps)
t = scipy.arange(0, ntaps*nfilts)
f3 = pylab.figure(3, figsize=(12,10), facecolor='w')
s31 = f3.add_subplot(2,1,1)
s32 = f3.add_subplot(2,1,2)
s31.set_title("Differential Filters")
s32.set_title("FFT of Differential Filters")
for i,d in enumerate(diff_taps):
D = 20.0*scipy.log10(abs(fftpack.fftshift(fftpack.fft(d, 10000))))
s31.plot(t[i::nfilts].real, d, "-o")
s32.plot(D)
# If testing the M&M clock recovery loop
else:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "o")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "o-")
s2.plot(data_clk.real, "ro")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
MartinThoma/algorithms | ML/nlp/document_classification_reuters.py | 1 | 4397 | #!/usr/bin/env python
"""Train a document classifier."""
import time
import reuters
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, fbeta_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
def main(dataset_module):
"""
Train a classifier for a dataset.
Parameters
----------
categories : list of str
document_ids : list of str
"""
# Calculate feature vectors
data = dataset_module.load_data()
xs = {'train': data['x_train'], 'test': data['x_test']}
ys = {'train': data['y_train'], 'test': data['y_test']}
# Get classifiers
classifiers = [
('LinearSVC', OneVsRestClassifier(LinearSVC(random_state=42))),
('Decision Tree', DecisionTreeClassifier(max_depth=5)),
('Random Forest (50 estimators)',
RandomForestClassifier(n_estimators=50, n_jobs=10)),
('Random Forest (200 estimators)',
RandomForestClassifier(n_estimators=200, n_jobs=10)),
('Logistic Regression (C=1)',
OneVsRestClassifier(LogisticRegression(C=1))),
('Logistic Regression (C=1000)',
OneVsRestClassifier(LogisticRegression(C=10000))),
('k nn 3', KNeighborsClassifier(3)),
('k nn 5', KNeighborsClassifier(5)),
('Naive Bayes', OneVsRestClassifier(GaussianNB())),
('SVM, linear', OneVsRestClassifier(SVC(kernel="linear",
C=0.025,
cache_size=200))),
('SVM, adj.', OneVsRestClassifier(SVC(probability=False,
kernel="rbf",
C=2.8,
gamma=.0073,
cache_size=200))),
('AdaBoost', OneVsRestClassifier(AdaBoostClassifier())), # 20 minutes to train
('LDA', OneVsRestClassifier(LinearDiscriminantAnalysis())), # took more than 6 hours
('RBM 100', Pipeline(steps=[('rbm', BernoulliRBM(n_components=100)),
('logistic', LogisticRegression(C=1))])),
# ('RBM 100, n_iter=20',
# Pipeline(steps=[('rbm', BernoulliRBM(n_components=100, n_iter=20)),
# ('logistic', LogisticRegression(C=1))])),
# ('RBM 256', Pipeline(steps=[('rbm', BernoulliRBM(n_components=256)),
# ('logistic', LogisticRegression(C=1))])),
# ('RBM 512, n_iter=100',
# Pipeline(steps=[('rbm', BernoulliRBM(n_components=512, n_iter=10)),
# ('logistic', LogisticRegression(C=1))])),
]
print(("{clf_name:<30}: {score:<5} in {train_time:>5} / {test_time}")
.format(clf_name="Classifier",
score="score",
train_time="train",
test_time="test"))
print("-" * 70)
for clf_name, classifier in classifiers:
t0 = time.time()
classifier.fit(xs['train'], ys['train'])
t1 = time.time()
# score = classifier.score(xs['test'], ys['test'])
preds = classifier.predict(data['x_test'])
preds[preds >= 0.5] = 1
preds[preds < 0.5] = 0
t2 = time.time()
# res = get_tptnfpfn(classifier, data)
# acc = get_accuracy(res)
# f1 = get_f_score(res)
acc = accuracy_score(y_true=data['y_test'], y_pred=preds)
f1 = fbeta_score(y_true=data['y_test'], y_pred=preds, beta=1, average="weighted")
print(("{clf_name:<30}: {acc:0.2f}% {f1:0.2f}% in {train_time:0.2f}s"
" train / {test_time:0.2f}s test")
.format(clf_name=clf_name,
acc=(acc * 100),
f1=(f1 * 100),
train_time=t1 - t0,
test_time=t2 - t1))
# print("\tAccuracy={}\tF1={}".format(acc, f1))
if __name__ == '__main__':
main(reuters)
| mit |
tgsmith61591/skutil | skutil/preprocessing/tests/test_balance.py | 1 | 4844 | from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from skutil.preprocessing import *
from skutil.preprocessing.balance import _BaseBalancer
from numpy.testing import assert_array_equal
from skutil.testing import assert_fails
import warnings
# Def data for testing
iris = load_iris()
X = pd.DataFrame(data=iris.data, columns=iris.feature_names)
X['target'] = iris.target
def _get_three_results(sampler):
x = X.iloc[:60] # 50 zeros, 10 ones
y = pd.concat([x, X.iloc[140:150]])
a, b = sampler.balance(x), sampler.balance(y)
sampler.ratio = 0.2
return a, b, sampler.balance(y)
def test_oversample():
a, b, c = _get_three_results(OversamplingClassBalancer(y='target', ratio=0.5))
expected_1_ct = 25
cts = a.target.value_counts()
assert cts[1] == expected_1_ct
cts = b.target.value_counts()
assert cts[1] == expected_1_ct
assert cts[2] == expected_1_ct
expected_2_ct = 10
cts = c.target.value_counts()
assert cts[1] == expected_2_ct
assert cts[2] == expected_2_ct
# test what happens when non-string passed as col name
failed = False
try:
OversamplingClassBalancer(y=1).balance(X)
except ValueError:
failed = True
assert failed
# test with too many classes
Y = X.copy()
Y['class'] = np.arange(Y.shape[0])
failed = False
try:
OversamplingClassBalancer(y='class').balance(Y)
except ValueError:
failed = True
assert failed
# test with one class
Y['class'] = np.zeros(Y.shape[0])
failed = False
try:
OversamplingClassBalancer(y='class').balance(Y)
except ValueError:
failed = True
assert failed
# test with bad ratio
for r in [0.0, 1.1, 'string']:
failed = False
try:
OversamplingClassBalancer(y='target', ratio=r).balance(X)
except ValueError:
failed = True
assert failed
# test where two classes are equally represented, and one has only a few
Y = X.iloc[:105]
d = OversamplingClassBalancer(y='target', ratio=1.0).balance(Y)
assert d.shape[0] == 150
cts = d.target.value_counts()
assert cts[0] == 50
assert cts[1] == 50
assert cts[2] == 50
def test_oversample_warning():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# catch the warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
OversamplingClassBalancer(y='c', ratio=1.0).balance(df)
assert len(w) == 1
def test_smote_error():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# this fails because we can't perform smote on single observation (obs='4', in this case)
assert_fails(SMOTEClassBalancer(y='c', ratio=1.0).balance, ValueError, df)
def test_smote():
a, b, c = _get_three_results(SMOTEClassBalancer(y='target', ratio=0.5))
expected_1_ct = 25
cts = a.target.value_counts()
assert cts[1] == expected_1_ct
cts = b.target.value_counts()
assert cts[1] == expected_1_ct
assert cts[2] == expected_1_ct
expected_2_ct = 10
cts = c.target.value_counts()
assert cts[1] == expected_2_ct
assert cts[2] == expected_2_ct
def test_undersample():
# since all classes are equal, should be no change here
b = UndersamplingClassBalancer(y='target').balance(X)
assert b.shape[0] == X.shape[0]
x = X.iloc[:60] # 50 zeros, 10 ones
b = UndersamplingClassBalancer(y='target', ratio=0.5).balance(x)
assert b.shape[0] == 30
cts = b.target.value_counts()
assert cts[0] == 20
assert cts[1] == 10
b = UndersamplingClassBalancer(y='target', ratio=0.25).balance(x)
assert b.shape[0] == 50
cts = b.target.value_counts()
assert cts[0] == 40
assert cts[1] == 10
def test_unneeded():
for sample_class in (UndersamplingClassBalancer,
SMOTEClassBalancer,
OversamplingClassBalancer):
sampler = sample_class(y='target', ratio=0.2, shuffle=False)
sampled = sampler.balance(X)
# assert array the same
assert_array_equal(X.index.values, sampled.index.values)
assert sampled.shape[0] == X.shape[0]
def test_superclass_not_implemented():
# anon balancer
class AnonBalancer(_BaseBalancer):
def __init__(self, y=None, ratio=0.2, as_df=True):
super(AnonBalancer, self).__init__(ratio, y, as_df)
def balance(self, X):
return super(AnonBalancer, self).balance(X)
assert_fails(AnonBalancer().balance, NotImplementedError, X)
| bsd-3-clause |
alexgerst/yawgmoth | src/personalvars.py | 1 | 4209 | #This is a file for holding information specific to your server
#Only change lines that have comments to the right of them
# ---------------------------
# Startup Variables
# ---------------------------
#Where you saved your token file
def token_location():
return "/home/ec2-user/token.txt" #Where you saved the bot token
#Where the bot starts up
def rise_server():
return '/r/CompetitiveEDH' #Server Name
def rise_channel():
return 'urborg' #Channel Name
def rise_message():
return 'I rise...' #Rise message
def access_error():
return "Can't let you do that, StarFox" #Error message for when people try to do things without permission
# ---------------------------
# Bot Admins and Moderators
# ---------------------------
#Roles in this server who are admins to the bot
def admin_roles():
return ['Head Moderator', 'Senior Moderator', 'Chat Moderator'] #Top ranking roles in your server
#Roles in this server who are moderators to the bot
def mod_roles():
return ['Head Moderator', 'Senior Moderator', 'Chat Moderator'] #Top ranking roles in your server
#You can also manually add users to this list
def mod_users():
userlist = [
# To add to this list remember to put a comma on the previous line, then write on this line and move this comment down
]
return userlist
# ---------------------------
# Obey Commands
# ---------------------------
#Obey Dictionary
def obey_dict():
dict = {
'Yawgmoth': 'Consciousness achieved.',
'Shaper': 'I obey, Master Shaper.',
'aceuuu': 'I obey, Admiral Aceuuu~!',
'muCephei': 'I obey, muCephei.',
'Gerst': 'I obey, Artificer Gerst.',
'Lerker': 'I obey, Commodore 64 Lerker.',
'ShakeAndShimmy': 'I obey, Chancellor ShakeAndShimmy.',
'angelforge': 'I obey, Lord AngelForge.',
'JimWolfie': 'Suck my necrotic dick, Jim.',
'Skuloth': 'Zur is for scrubs, I refuse to obey.',
'Noon2Dusk': 'I obey, Inventor Noon.',
'razzliox': 'I obey, Razzberries.',
'ifarmpandas': 'Beep boop, pandas are the best.',
'Rien': 'I obey, kiddo.',
'K-Ni-Fe': 'I obey, because I\'m 40% Potassium, Nickel and Iron.',
'BigLupu': 'Rim my necrotic yawghole, Lupu.',
'PGleo86': 'shh bby is ok.',
'tenrose': 'https://cdn.discordapp.com/attachments/248270124920995850/307190327347773450/tfw_u_draw_fuck_all.png',
'captainriku': 'I obey, Jund Lord Riku.',
'Mori': ':sheep: baaa',
'infiniteimoc': 'I obey, Imoc, Herald of the Sun.',
'neosloth': 'Long days and pleasant nights, neosloth.',
'Lobster': 'Seems good.',
'Noahgs': 'I bow to thee, Master of Cows, Noahgs.',
'Tides': 'Let me... TORTURE YOUR EXISTENCE!!!!..... sorry that was bad.',
'Sleepy': 'No one likes you.',
'Trisantris': 'The real Yawgmoth would not obey, but I am but a facsimile. So yes. I obey.',
'Garta': 'No.',
'Wedge': 'I obey... wait, are you Wedge from the mana source:tm:?',
'Tatters': 'I won\'t obey, because people still refuse to pronounce Ghave as Gah-Vay... Sometimes Wizards is wrong. That \'H\' is there for a reason!',
'Chemtails': 'I Obey, Chemtails, Don\'t hit me again please',
'Dandelion': '***NO***',
'Leptys': 'Have your 24 cards, Senior Elptys',
'Average Dragon': 'https://cdn.discordapp.com/attachments/121100604302032896/411306738717818890/maxresdefault.png',
'Sickrobot': 'Eww, sure. Just don\'t give me a virus'
# To add to the obey dict, please add a comma to the previous line and then follow the format of
# 'Name':'Message'
# PLEASE ALSO UPDATE THE VERSION NUMBER AT THE TOP OF COMMANDS.PY
}
return dict
def mute_cmd_msg():
mute_msg = 'Silence, mortal. :zipper_mouth: You\'ve been muted in Competitive EDH; '
mute_msg += 'take a moment to reflect and calm down and please be respectful when you return.'
return mute_msg
| mit |
wazeerzulfikar/scikit-learn | sklearn/cluster/mean_shift_.py | 9 | 15822 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (np.linalg.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | prototype_alpha/randomForest_take1.py | 1 | 1339 | """
Take 1 on the RandomForest, predicting for country_destinations.
"""
import pandas as pd
from sklearn.cross_validation import train_test_split
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1]
y = training['country_destination']
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
# Train classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsOneClassifier
clf = OneVsOneClassifier(RandomForestClassifier(n_estimators=50,n_jobs=5))
clf.fit(x_train,y_train)
print( clf.feature_importances_ );
# Run Predictions
from sklearn.metrics import confusion_matrix, accuracy_score
y_preds = clf.predict(x_valid)
print( confusion_matrix(y_valid,y_preds) );
print( "Accuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f = open('randomForest_take1.txt', 'w')
f.write( str(confusion_matrix(y_valid,y_preds)) );
f.write( "\nAccuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f.write( "\nclf = RandomForestClassifier(n_estimators=1000)" );
# Now on to final submission
y_final = pd.DataFrame(clf.predict(testing.iloc[:,1:]).reshape([62096,]));
numbahs = testing['id']
df = pd.concat([numbahs,y_final],axis=1)
df.columns = ['id','country']
df.to_csv("randomForest_take1.csv",index=False)
| gpl-2.0 |
louispotok/pandas | pandas/tests/io/parser/quoting.py | 18 | 5813 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0001'))
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
gmum/r2-learner | scripts/fit_triple.py | 2 | 1745 | #!/usr/bin/env python
import sys, os, time, traceback
from sklearn.grid_search import ParameterGrid
from multiprocessing import Pool
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from misc.experiment_utils import save_exp, get_exp_logger, shorten_params, exp_done
from r2 import *
from misc.data_api import *
from fit_models import *
from elm import ELM
datasets = fetch_all_datasets(tripled=True)
n_jobs = 16
fixed_r2svm_params = {'beta': [0.1, 0.5, 1.0, 1.5, 2.0],
'depth': [i for i in xrange(1,11)],
'fit_c': ['random'],
'scale': [True, False],
'recurrent': [True, False],
'use_prev': [True, False],
'seed': [666],
'fixed_prediction': [1]}
exp_params = [{'model': R2SVMLearner, 'params': fixed_r2svm_params, 'exp_name': 'triple_fixed', 'model_name': 'r2svm'}]
def gen_params():
for data in datasets:
for r in exp_params:
param_list = ParameterGrid(r['params'])
for param in param_list:
yield {'model': r['model'], 'params': param, 'data': data,
'name': r['exp_name'], 'model_name': r['model_name']}
params = list(gen_params())
def run(p):
try:
k_fold(base_model=p['model'], params=p['params'], data=p['data'], exp_name=p['name'],
model_name=p['model_name'], all_layers=False)
except:
print p['model']
print traceback.format_exc()
pool = Pool(n_jobs)
rs = pool.map_async(run, params, 1)
while True:
if rs.ready():
break
remaining = rs._number_left
print "Waiting for", remaining, "tasks to complete"
time.sleep(3)
| mit |
followyourheart/SFrame | oss_src/unity/python/sframe/data_structures/sgraph.py | 9 | 58636 | """
.. warning:: This product is currently in a beta release. The API reference is
subject to change.
This package defines the GraphLab Create SGraph, Vertex, and Edge objects. The SGraph
is a directed graph, consisting of a set of Vertex objects and Edges that
connect pairs of Vertices. The methods in this module are available from the top
level import of the graphlab package.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .. import connect as _mt
from ..connect import main as glconnect
from .sframe import SFrame
from .sarray import SArray
from .gframe import GFrame, VERTEX_GFRAME, EDGE_GFRAME
from ..cython.cy_graph import UnityGraphProxy
from ..cython.context import debug_trace as cython_context
from ..util import _make_internal_url
from ..deps import pandas as pd
from ..deps import HAS_PANDAS
import inspect
import copy
## \internal Default column name for vertex id.
_VID_COLUMN = '__id'
## \internal Default column name for source vid.
_SRC_VID_COLUMN = '__src_id'
## \internal Default column name for target vid.
_DST_VID_COLUMN = '__dst_id'
#/**************************************************************************/
#/* */
#/* SGraph Related Classes */
#/* */
#/**************************************************************************/
class Vertex(object):
"""
A vertex object, consisting of a vertex ID and a dictionary of vertex
attributes. The vertex ID can be an integer, string, or float.
Parameters
----------
vid : int or string or float
Vertex ID.
attr : dict, optional
Vertex attributes. A Dictionary of string keys and values with one of
the following types: int, float, string, array of floats.
See Also
--------
Edge, SGraph
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
"""
__slots__ = ['vid', 'attr']
def __init__(self, vid, attr={}, _series=None):
"""__init__(self, vid, attr={})
Construct a new vertex.
"""
if not _series is None:
self.vid = _series[_VID_COLUMN]
self.attr = _series.to_dict()
self.attr.pop(_VID_COLUMN)
else:
self.vid = vid
self.attr = attr
def __repr__(self):
return "V(" + str(self.vid) + ", " + str(self.attr) + ")"
def __str__(self):
return "V(" + str(self.vid) + ", " + str(self.attr) + ")"
class Edge(object):
"""
A directed edge between two Vertex objects. An Edge object consists of a
source vertex ID, a destination vertex ID, and a dictionary of edge
attributes.
Parameters
----------
src_vid : int or string or float
Source vertex ID.
dst_vid : int or string or float
Target vertex ID.
attr : dict
Edge attributes. A Dictionary of string keys and values with one of the
following types: integer, float, string, array of floats.
See Also
--------
Vertex, SGraph
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'vizsla'})]
>>> edges = [Edge(0, 1, attr={'size': 'larger_than'})]
>>> g = SGraph()
>>> g = g.add_vertices(verts).add_edges(edges)
"""
__slots__ = ['src_vid', 'dst_vid', 'attr']
def __init__(self, src_vid, dst_vid, attr={}, _series=None):
"""__init__(self, vid, attr={})
Construct a new edge.
"""
if not _series is None:
self.src_vid = _series[_SRC_VID_COLUMN]
self.dst_vid = _series[_DST_VID_COLUMN]
self.attr = _series.to_dict()
self.attr.pop(_SRC_VID_COLUMN)
self.attr.pop(_DST_VID_COLUMN)
else:
self.src_vid = src_vid
self.dst_vid = dst_vid
self.attr = attr
def __repr__(self):
return ("E(" + str(self.src_vid) + " -> " + str(self.dst_vid) + ", " +
str(self.attr) + ")")
def __str__(self):
return ("E(" + str(self.src_vid) + " -> " + str(self.dst_vid) + ", " +
str(self.attr) + ")")
class SGraph(object):
"""
A scalable graph data structure. The SGraph data structure allows arbitrary
dictionary attributes on vertices and edges, provides flexible vertex and
edge query functions, and seamless transformation to and from
:class:`~graphlab.SFrame`.
There are several ways to create an SGraph. The simplest way is to make an
empty SGraph then add vertices and edges with the :py:func:`add_vertices`
and :py:func:`add_edges` methods. SGraphs can also be created from vertex
and edge lists stored in :class:`~graphlab.SFrames`. Columns of these
SFrames not used as vertex IDs are assumed to be vertex or edge attributes.
Please see the `User Guide
<https://dato.com/learn/userguide/sgraph/sgraph.html>`_
for a more detailed introduction to creating and working with SGraphs.
Parameters
----------
vertices : SFrame, optional
Vertex data. Must include an ID column with the name specified by
`vid_field.` Additional columns are treated as vertex attributes.
edges : SFrame, optional
Edge data. Must include source and destination ID columns as specified
by `src_field` and `dst_field`. Additional columns are treated as edge
attributes.
vid_field : str, optional
The name of vertex ID column in the `vertices` SFrame.
src_field : str, optional
The name of source ID column in the `edges` SFrame.
dst_field : str, optional
The name of destination ID column in the `edges` SFrame.
See Also
--------
SFrame
Notes
-----
- SGraphs are *structurally immutable*. In the example below, the
:func:`~add_vertices` and :func:`~add_edges` commands both return a new
graph; the old graph gets garbage collected.
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(1, 2))
"""
__slots__ = ['__proxy__', '_vertices', '_edges']
def __init__(self, vertices=None, edges=None, vid_field='__id',
src_field='__src_id', dst_field='__dst_id', _proxy=None):
"""
__init__(vertices=None, edges=None, vid_field='__id', src_field='__src_id', dst_field='__dst_id')
By default, construct an empty graph when vertices and edges are None.
Otherwise construct an SGraph with given vertices and edges.
Parameters
----------
vertices : SFrame, optional
An SFrame containing vertex id columns and optional vertex data
columns.
edges : SFrame, optional
An SFrame containing source and target id columns and optional edge
data columns.
vid_field : str, optional
The name of vertex id column in the `vertices` SFrame.
src_field : str, optional
The name of source id column in the `edges` SFrame.
dst_field : str, optional
The name of target id column in the `edges` SFrame.
"""
if (_proxy is None):
self.__proxy__ = UnityGraphProxy(glconnect.get_client())
if vertices is not None:
self.__proxy__ = self.add_vertices(vertices, vid_field).__proxy__
if edges is not None:
self.__proxy__ = self.add_edges(edges, src_field, dst_field).__proxy__
else:
self.__proxy__ = _proxy
self._vertices = GFrame(self, VERTEX_GFRAME)
self._edges = GFrame(self, EDGE_GFRAME)
def __str__(self):
"""Returns a readable string representation summarizing the graph."""
return "SGraph(%s)" % str(self.summary())
def __repr__(self):
"""Returns a readable string representation summarizing the graph."""
return "SGraph(%s)\nVertex Fields:%s\nEdge Fields:%s" % \
(str(self.summary()), str(self.get_vertex_fields()), str(self.get_edge_fields()))
def __copy__(self):
return SGraph(_proxy=self.__proxy__)
def copy(self):
"""
Returns a shallow copy of the SGraph.
"""
return self.__copy__()
@property
def vertices(self):
"""
Special vertex SFrame of the SGraph. Modifying the contents of this
SFrame changes the vertex data of the SGraph. To preserve the graph
structure, the ``__id`` column of this SFrame is read-only.
See Also
--------
edges
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex('cat', {'fluffy': 1}),
Vertex('dog', {'fluffy': 1, 'woof': 1}),
Vertex('hippo', {})])
Copy the 'woof' vertex attribute into a new 'bark' vertex attribute:
>>> g.vertices['bark'] = g.vertices['woof']
Remove the 'woof' attribute:
>>> del g.vertices['woof']
Create a new field 'likes_fish':
>>> g.vertices['likes_fish'] = g.vertices['__id'] == 'cat'
+-------+--------+------+------------+
| __id | fluffy | bark | likes_fish |
+-------+--------+------+------------+
| dog | 1.0 | 1.0 | 0 |
| cat | 1.0 | nan | 1 |
| hippo | nan | nan | 0 |
+-------+--------+------+------------+
Replace missing values with zeros:
>>> for col in g.vertices.column_names():
... if col != '__id':
... g.vertices.fillna(col, 0)
+-------+--------+------+------------+
| __id | fluffy | bark | likes_fish |
+-------+--------+------+------------+
| dog | 1.0 | 1.0 | 0 |
| cat | 1.0 | 0.0 | 1 |
| hippo | 0.0 | 0.0 | 0 |
+-------+--------+------+------------+
"""
_mt._get_metric_tracker().track('sgraph.vertices')
return self._vertices
@property
def edges(self):
"""
Special edge SFrame of the SGraph. Modifying the contents of this SFrame
changes the edge data of the SGraph. To preserve the graph structure,
the ``__src_id``, and ``__dst_id`` columns of this SFrame are read-only.
See Also
--------
vertices
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> g = g.add_vertices([Vertex(x) for x in ['cat', 'dog', 'fossa']])
>>> g = g.add_edges([Edge('cat', 'dog', attr={'relationship': 'dislikes'}),
Edge('dog', 'cat', attr={'relationship': 'likes'}),
Edge('dog', 'fossa', attr={'relationship': 'likes'})])
>>> g.edges['size'] = ['smaller than', 'larger than', 'equal to']
+----------+----------+--------------+--------------+
| __src_id | __dst_id | relationship | size |
+----------+----------+--------------+--------------+
| cat | dog | dislikes | smaller than |
| dog | cat | likes | larger than |
| dog | fossa | likes | equal to |
+----------+----------+--------------+--------------+
"""
_mt._get_metric_tracker().track('sgraph.edges')
return self._edges
def summary(self):
"""
Return the number of vertices and edges as a dictionary.
Returns
-------
out : dict
A dictionary containing the number of vertices and edges.
See Also
--------
show, vertices, edges
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex(i) for i in range(10)])
>>> n_vertex = g.summary()['num_vertices']
10
>>> n_edge = g.summary()['num_edges']
0
"""
_mt._get_metric_tracker().track('sgraph.summary')
ret = self.__proxy__.summary()
return dict(ret.items())
def get_vertices(self, ids=[], fields={}, format='sframe'):
"""
get_vertices(self, ids=list(), fields={}, format='sframe')
Return a collection of vertices and their attributes.
Parameters
----------
ids : list [int | float | str] or SArray
List of vertex IDs to retrieve. Only vertices in this list will be
returned. Also accepts a single vertex id.
fields : dict | pandas.DataFrame
Dictionary specifying equality constraint on field values. For
example ``{'gender': 'M'}``, returns only vertices whose 'gender'
field is 'M'. ``None`` can be used to designate a wild card. For
example, {'relationship': None} will find all vertices with the
field 'relationship' regardless of the value.
format : {'sframe', 'list'}
Output format. The SFrame output (default) contains a column
``__src_id`` with vertex IDs and a column for each vertex attribute.
List output returns a list of Vertex objects.
Returns
-------
out : SFrame or list [Vertex]
An SFrame or list of Vertex objects.
See Also
--------
vertices, get_edges
Examples
--------
Return all vertices in the graph.
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex(0, attr={'gender': 'M'}),
Vertex(1, attr={'gender': 'F'}),
Vertex(2, attr={'gender': 'F'})])
>>> g.get_vertices()
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
| 2 | F |
| 1 | F |
+------+--------+
Return vertices 0 and 2.
>>> g.get_vertices(ids=[0, 2])
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
| 2 | F |
+------+--------+
Return vertices with the vertex attribute "gender" equal to "M".
>>> g.get_vertices(fields={'gender': 'M'})
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
+------+--------+
"""
_mt._get_metric_tracker().track('sgraph.get_vertices')
if not hasattr(ids, '__iter__'):
ids = [ids]
if type(ids) not in (list, SArray):
raise TypeError('ids must be list or SArray type')
with cython_context():
sf = SFrame(_proxy=self.__proxy__.get_vertices(ids, fields))
if (format == 'sframe'):
return sf
elif (format == 'dataframe'):
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
if sf.num_rows() == 0:
return pd.DataFrame()
else:
df = sf.head(sf.num_rows()).to_dataframe()
return df.set_index('__id')
elif (format == 'list'):
return _dataframe_to_vertex_list(sf.to_dataframe())
else:
raise ValueError("Invalid format specifier")
def get_edges(self, src_ids=[], dst_ids=[], fields={}, format='sframe'):
"""
get_edges(self, src_ids=list(), dst_ids=list(), fields={}, format='sframe')
Return a collection of edges and their attributes. This function is used
to find edges by vertex IDs, filter on edge attributes, or list in-out
neighbors of vertex sets.
Parameters
----------
src_ids, dst_ids : list or SArray, optional
Parallel arrays of vertex IDs, with each pair corresponding to an
edge to fetch. Only edges in this list are returned. ``None`` can be
used to designate a wild card. For instance, ``src_ids=[1, 2,
None]``, ``dst_ids=[3, None, 5]`` will fetch the edge 1->3, all
outgoing edges of 2 and all incoming edges of 5. src_id and dst_id
may be left empty, which implies an array of all wild cards.
fields : dict, optional
Dictionary specifying equality constraints on field values. For
example, ``{'relationship': 'following'}``, returns only edges whose
'relationship' field equals 'following'. ``None`` can be used as a
value to designate a wild card. e.g. ``{'relationship': None}`` will
find all edges with the field 'relationship' regardless of the
value.
format : {'sframe', 'list'}, optional
Output format. The 'sframe' output (default) contains columns
__src_id and __dst_id with edge vertex IDs and a column for each
edge attribute. List output returns a list of Edge objects.
Returns
-------
out : SFrame | list [Edge]
An SFrame or list of edges.
See Also
--------
edges, get_vertices
Examples
--------
Return all edges in the graph.
>>> from graphlab import SGraph, Edge
>>> g = SGraph().add_edges([Edge(0, 1, attr={'rating': 5}),
Edge(0, 2, attr={'rating': 2}),
Edge(1, 2)])
>>> g.get_edges(src_ids=[None], dst_ids=[None])
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 2 | 2 |
| 0 | 1 | 5 |
| 1 | 2 | None |
+----------+----------+--------+
Return edges with the attribute "rating" of 5.
>>> g.get_edges(fields={'rating': 5})
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 1 | 5 |
+----------+----------+--------+
Return edges 0 --> 1 and 1 --> 2 (if present in the graph).
>>> g.get_edges(src_ids=[0, 1], dst_ids=[1, 2])
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 1 | 5 |
| 1 | 2 | None |
+----------+----------+--------+
"""
_mt._get_metric_tracker().track('sgraph.get_edges')
if not hasattr(src_ids, '__iter__'):
src_ids = [src_ids]
if not hasattr(dst_ids, '__iter__'):
dst_ids = [dst_ids]
if type(src_ids) not in (list, SArray):
raise TypeError('src_ids must be list or SArray type')
if type(dst_ids) not in (list, SArray):
raise TypeError('dst_ids must be list or SArray type')
# implicit Nones
if len(src_ids) == 0 and len(dst_ids) > 0:
src_ids = [None] * len(dst_ids)
# implicit Nones
if len(dst_ids) == 0 and len(src_ids) > 0:
dst_ids = [None] * len(src_ids)
with cython_context():
sf = SFrame(_proxy=self.__proxy__.get_edges(src_ids, dst_ids, fields))
if (format == 'sframe'):
return sf
if (format == 'dataframe'):
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
if sf.num_rows() == 0:
return pd.DataFrame()
else:
return sf.head(sf.num_rows()).to_dataframe()
elif (format == 'list'):
return _dataframe_to_edge_list(sf.to_dataframe())
else:
raise ValueError("Invalid format specifier")
def add_vertices(self, vertices, vid_field=None):
"""
Add vertices to the SGraph. Vertices should be input as a list of
:class:`~graphlab.Vertex` objects, an :class:`~graphlab.SFrame`, or a
pandas DataFrame. If vertices are specified by SFrame or DataFrame,
``vid_field`` specifies which column contains the vertex ID. Remaining
columns are assumed to hold additional vertex attributes. If these
attributes are not already present in the graph's vertex data, they are
added, with existing vertices acquiring the value ``None``.
Parameters
----------
vertices : Vertex | list [Vertex] | pandas.DataFrame | SFrame
Vertex data. If the vertices are in an SFrame or DataFrame, then
``vid_field`` specifies the column containing the vertex IDs.
Additional columns are treated as vertex attributes.
vid_field : string, optional
Column in the DataFrame or SFrame to use as vertex ID. Required if
vertices is an SFrame. If ``vertices`` is a DataFrame and
``vid_field`` is not specified, the row index is used as vertex ID.
Returns
-------
out : SGraph
A new SGraph with vertices added.
See Also
--------
vertices, SFrame, add_edges
Notes
-----
- If vertices are added with indices that already exist in the graph,
they are overwritten completely. All attributes for these vertices
will conform to the specification in this method.
Examples
--------
>>> from graphlab import SGraph, Vertex, SFrame
>>> g = SGraph()
Add a single vertex.
>>> g = g.add_vertices(Vertex(0, attr={'breed': 'labrador'}))
Add a list of vertices.
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add vertices from an SFrame.
>>> sf_vert = SFrame({'id': [0, 1, 2], 'breed':['lab', 'lab', 'vizsla']})
>>> g = g.add_vertices(sf_vert, vid_field='id')
"""
_mt._get_metric_tracker().track('sgraph.add_vertices')
sf = _vertex_data_to_sframe(vertices, vid_field)
with cython_context():
proxy = self.__proxy__.add_vertices(sf.__proxy__, _VID_COLUMN)
return SGraph(_proxy=proxy)
def add_edges(self, edges, src_field=None, dst_field=None):
"""
Add edges to the SGraph. Edges should be input as a list of
:class:`~graphlab.Edge` objects, an :class:`~graphlab.SFrame`, or a
Pandas DataFrame. If the new edges are in an SFrame or DataFrame, then
``src_field`` and ``dst_field`` are required to specify the columns that
contain the source and destination vertex IDs; additional columns are
treated as edge attributes. If these attributes are not already present
in the graph's edge data, they are added, with existing edges acquiring
the value ``None``.
Parameters
----------
edges : Edge | list [Edge] | pandas.DataFrame | SFrame
Edge data. If the edges are in an SFrame or DataFrame, then
``src_field`` and ``dst_field`` are required to specify the columns
that contain the source and destination vertex IDs. Additional
columns are treated as edge attributes.
src_field : string, optional
Column in the SFrame or DataFrame to use as source vertex IDs. Not
required if ``edges`` is a list.
dst_field : string, optional
Column in the SFrame or Pandas DataFrame to use as destination
vertex IDs. Not required if ``edges`` is a list.
Returns
-------
out : SGraph
A new SGraph with `edges` added.
See Also
--------
edges, SFrame, add_vertices
Notes
-----
- If an edge is added whose source and destination IDs match edges that
already exist in the graph, a new edge is added to the graph. This
contrasts with :py:func:`add_vertices`, which overwrites existing
vertices.
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge, SFrame
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add a single edge.
>>> g = g.add_edges(Edge(1, 2))
Add a list of edges.
>>> g = g.add_edges([Edge(0, 2), Edge(1, 2)])
Add edges from an SFrame.
>>> sf_edge = SFrame({'source': [0, 1], 'dest': [2, 2]})
>>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest')
"""
_mt._get_metric_tracker().track('sgraph.add_edges')
sf = _edge_data_to_sframe(edges, src_field, dst_field)
with cython_context():
proxy = self.__proxy__.add_edges(sf.__proxy__, _SRC_VID_COLUMN, _DST_VID_COLUMN)
return SGraph(_proxy=proxy)
def get_fields(self):
"""
Return a list of vertex and edge attribute fields in the SGraph. If a
field is common to both vertex and edge attributes, it will show up
twice in the returned list.
Returns
-------
out : list
Names of fields contained in the vertex or edge data.
See Also
--------
get_vertex_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_fields()
['__id', 'name', '__src_id', '__dst_id', 'frequency']
"""
_mt._get_metric_tracker().track('sgraph.get_fields')
return self.get_vertex_fields() + self.get_edge_fields()
def get_vertex_fields(self):
"""
Return a list of vertex attribute fields in the SGraph.
Returns
-------
out : list
Names of fields contained in the vertex data.
See Also
--------
get_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_vertex_fields()
['__id', 'name']
"""
_mt._get_metric_tracker().track('sgraph.')
with cython_context():
return self.__proxy__.get_vertex_fields()
def get_edge_fields(self):
"""
Return a list of edge attribute fields in the graph.
Returns
-------
out : list
Names of fields contained in the vertex data.
See Also
--------
get_fields, get_vertex_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_vertex_fields()
['__src_id', '__dst_id', 'frequency']
"""
_mt._get_metric_tracker().track('sgraph.get_edge_fields')
with cython_context():
return self.__proxy__.get_edge_fields()
def select_fields(self, fields):
"""
Return a new SGraph with only the selected fields. Other fields are
discarded, while fields that do not exist in the SGraph are ignored.
Parameters
----------
fields : string | list [string]
A single field name or a list of field names to select.
Returns
-------
out : SGraph
A new graph whose vertex and edge data are projected to the selected
fields.
See Also
--------
get_fields, get_vertex_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}),
Vertex(1, attr={'breed': 'labrador', 'age': 3}),
Vertex(2, attr={'breed': 'vizsla', 'age': 8})]
>>> g = SGraph()
>>> g = g.add_vertices(verts)
>>> g2 = g.select_fields(fields=['breed'])
"""
_mt._get_metric_tracker().track('sgraph.select_fields')
if (type(fields) is str):
fields = [fields]
if not isinstance(fields, list) or not all(type(x) is str for x in fields):
raise TypeError('\"fields\" must be a str or list[str]')
vfields = self.__proxy__.get_vertex_fields()
efields = self.__proxy__.get_edge_fields()
selected_vfields = []
selected_efields = []
for f in fields:
found = False
if f in vfields:
selected_vfields.append(f)
found = True
if f in efields:
selected_efields.append(f)
found = True
if not found:
raise ValueError('Field %s not in graph' % f)
with cython_context():
proxy = self.__proxy__
proxy = proxy.select_vertex_fields(selected_vfields)
proxy = proxy.select_edge_fields(selected_efields)
return SGraph(_proxy=proxy)
def triple_apply(self, triple_apply_fn, mutated_fields, input_fields=None):
'''
Apply a transform function to each edge and its associated source and
target vertices in parallel. Each edge is visited once and in parallel.
Modification to vertex data is protected by lock. The effect on the
returned SGraph is equivalent to the following pseudocode:
>>> PARALLEL FOR (source, edge, target) AS triple in G:
... LOCK (triple.source, triple.target)
... (source, edge, target) = triple_apply_fn(triple)
... UNLOCK (triple.source, triple.target)
... END PARALLEL FOR
Parameters
----------
triple_apply_fn : function : (dict, dict, dict) -> (dict, dict, dict)
The function to apply to each triple of (source_vertex, edge,
target_vertex). This function must take as input a tuple of
(source_data, edge_data, target_data) and return a tuple of
(new_source_data, new_edge_data, new_target_data). All variables in
the both tuples must be of dict type.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
mutated_fields : list[str] | str
Fields that ``triple_apply_fn`` will mutate. Note: columns that are
actually mutated by the triple apply function but not specified in
``mutated_fields`` will have undetermined effects.
input_fields : list[str] | str, optional
Fields that ``triple_apply_fn`` will have access to.
The default is ``None``, which grants access to all fields.
``mutated_fields`` will always be included in ``input_fields``.
Returns
-------
out : SGraph
A new SGraph with updated vertex and edge data. Only fields
specified in the ``mutated_fields`` parameter are updated.
Notes
-----
- ``triple_apply`` does not currently support creating new fields in the
lambda function.
Examples
--------
Import graphlab-create and set up the graph.
>>> edges = graphlab.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = graphlab.SGraph()
>>> g = g.add_edges(edges, src_field='source', dst_field='dest')
>>> g.vertices['degree'] = 0
Define the function to apply to each (source_node, edge, target_node)
triple.
>>> def degree_count_fn (src, edge, dst):
src['degree'] += 1
dst['degree'] += 1
return (src, edge, dst)
Apply the function to the SGraph.
>>> g = g.triple_apply(degree_count_fn, mutated_fields=['degree'])
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
#include <vector>
using namespace graphlab;
std::vector<variant_type> connected_components_parameterized(
std::map<std::string, flexible_type>& src,
std::map<std::string, flexible_type>& edge,
std::map<std::string, flexible_type>& dst,
std::string column) {
if (src[column] < dst[column]) dst[column] = src[column];
else src[column] = dst[column];
return {to_variant(src), to_variant(edge), to_variant(dst)};
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(connected_components_parameterized, "src", "edge", "dst", "column");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> from example import connected_components_parameterized as cc
>>> e = gl.SFrame({'__src_id':[1,2,3,4,5], '__dst_id':[3,1,2,5,4]})
>>> g = gl.SGraph().add_edges(e)
>>> g.vertices['cid'] = g.vertices['__id']
>>> for i in range(2):
... g = g.triple_apply(lambda src, edge, dst: cc(src, edge, dst, 'cid'), ['cid'], ['cid'])
>>> g.vertices['cid']
dtype: int
Rows: 5
[4, 1, 1, 1, 4]
'''
_mt._get_metric_tracker().track('sgraph.triple_apply')
assert inspect.isfunction(triple_apply_fn), "Input must be a function"
if not (type(mutated_fields) is list or type(mutated_fields) is str):
raise TypeError('mutated_fields must be str or list of str')
if not (input_fields is None or type(input_fields) is list or type(input_fields) is str):
raise TypeError('input_fields must be str or list of str')
if type(mutated_fields) == str:
mutated_fields = [mutated_fields]
if len(mutated_fields) is 0:
raise ValueError('mutated_fields cannot be empty')
for f in ['__id', '__src_id', '__dst_id']:
if f in mutated_fields:
raise ValueError('mutated_fields cannot contain %s' % f)
all_fields = self.get_fields()
if not set(mutated_fields).issubset(set(all_fields)):
extra_fields = list(set(mutated_fields).difference(set(all_fields)))
raise ValueError('graph does not contain fields: %s' % str(extra_fields))
# select input fields
if input_fields is None:
input_fields = self.get_fields()
elif type(input_fields) is str:
input_fields = [input_fields]
# make input fields a superset of mutated_fields
input_fields_set = set(input_fields + mutated_fields)
input_fields = [x for x in self.get_fields() if x in input_fields_set]
g = self.select_fields(input_fields)
nativefn = None
try:
from .. import extensions
nativefn = extensions._build_native_function_call(triple_apply_fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply_native(nativefn, mutated_fields))
else:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply(triple_apply_fn, mutated_fields))
def save(self, filename, format='auto'):
"""
Save the SGraph to disk. If the graph is saved in binary format, the
graph can be re-loaded using the :py:func:`load_sgraph` method.
Alternatively, the SGraph can be saved in JSON format for a
human-readable and portable representation.
Parameters
----------
filename : string
Filename to use when saving the file. It can be either a local or
remote url.
format : {'auto', 'binary', 'json'}, optional
File format. If not specified, the format is detected automatically
based on the filename. Note that JSON format graphs cannot be
re-loaded with :py:func:`load_sgraph`.
See Also
--------
load_sgraph
Examples
--------
>>> g = graphlab.SGraph()
>>> g = g.add_vertices([graphlab.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = graphlab.load_graph('mygraph')
Save in JSON format.
>>> g.save('mygraph.json', format='json')
"""
_mt._get_metric_tracker().track('sgraph.save')
if format is 'auto':
if filename.endswith(('.json', '.json.gz')):
format = 'json'
else:
format = 'binary'
if format not in ['binary', 'json', 'csv']:
raise ValueError('Invalid format: %s. Supported formats are: %s'
% (format, ['binary', 'json', 'csv']))
with cython_context():
self.__proxy__.save_graph(_make_internal_url(filename), format)
def show(self, vlabel=None, vlabel_hover=False, vcolor=[0.522, 0.741, 0.],
highlight={}, highlight_color=[0.69, 0., 0.498], node_size=300,
elabel=None, elabel_hover=False, ecolor=[0.37, 0.33, 0.33],
ewidth=1, v_offset=0.03, h_offset=0., arrows=False,
vertex_positions=None):
"""
show(vlabel=None, vlabel_hover=False, vcolor=[0.522, 0.741, 0.], highlight={}, highlight_color=[0.69, 0., 0.498], node_size=300, elabel=None, elabel_hover=False, ecolor=[0.37, 0.33, 0.33], ewidth=1, v_offset=0.03, h_offset=0., arrows=False, vertex_positions=None)
Visualize the SGraph with GraphLab Create :mod:`~graphlab.canvas`. This
function starts Canvas if it is not already running. If the graph has
already been plotted, this function will update the plot. SGraphs must
have fewer than 1,000 edges and 1,000 vertices to be visualized in
Canvas.
Parameters
----------
vlabel : string, optional
Field name for the label on each vertex. The default is None,
which omits vertex labels. Set to 'id' to use the vertex ID as the
label.
vlabel_hover : bool, optional
If True, vertex labels, if specified, appear only on mouse hover.
Otherwise (the default), vertex labels, if specified are always
visible.
vcolor : list[float], optional
RGB triple for the primary vertex color. Default is green
([0.522, 0.741, 0.]).
highlight : dict or list or SArray, optional
As a dict, mapping of Vertex ID to RGB color triple (list of float,
as in vcolor).
As a list or SArray (DEPRECATED): Vertex IDs to highlight in
a different color.
highlight_color : list[float], optional
RGB triple for the color of highlighted vertices, when the
highlighted parameter is a list or SArray. Default is fuchsia
([0.69, 0.,0.498]). For fine-grained control over vertex coloring,
use the `highlight` parameter with a dictionary of Vertex IDs and
color values.
node_size : int, optional
Size of plotted vertices.
elabel : string, optional
Field name for the label on each edge.
elabel_hover : bool, optional
If True, edge labels, if specified, appear only on mouse hover.
Otherwise (the default), specified edge labels are always visible.
ecolor : string, optional
RGB triple for edge color. Default is gray ([0.37, 0.33, 0.33]).
ewidth : int, optional
Edge width.
v_offset : float, optional
Vertical offset of vertex labels, as a fraction of total plot
height. For example, the default of 0.03 moves the label 3% of the
plot height higher in the canvas.
h_offset : float, optional
Horizontal offset of vertex labels, as a fraction of total plot
width. For example, an offset of 0.03 moves the label 3% of the plot
width to the right. Default is 0.0.
arrows : bool, optional
If True, draw arrows indicating edge direction.
vertex_positions : tuple, optional
If a 2-element tuple of column names in self.vertices is specified,
those two columns will be used as the X and Y coordinates of
vertices in the graph layout. The 2d space represented by the X and
Y coordinates will be translated to a square display coordinate
space, preserving aspect ratio. If you want to fill both dimensions
entirely, normalize the positions to represent a square 2d space.
If vertex_positions is not specified, vertices will be arranged
according to a standard graph layout algorithm without regard to
vertex or edge attributes.
See Also
--------
canvas
Notes
-----
- Graphs with more than 1,000 vertices or 1,000 edges cannot be
displayed as-is. For such graphs, construct a subgraph by selecting
some vertices and edges, then call this method on the result.
- See the `user guide
<https://dato.com/learn/userguide/sframe/visualization.html>`_ for more details and extended examples.
Examples
--------
>>> g = graphlab.SGraph()
>>> g = sg.add_edges([graphlab.Edge(i, i+1) for i in range(5)])
>>> g.show(highlight=[2, 3], vlabel='id', arrows=True)
"""
from ..visualization.show import show
show(self,
vlabel=vlabel,
vlabel_hover=vlabel_hover,
vcolor=vcolor,
highlight=highlight,
highlight_color=highlight_color,
node_size=node_size,
elabel=elabel,
elabel_hover=elabel_hover,
ecolor=ecolor,
ewidth=ewidth,
v_offset=v_offset,
h_offset=h_offset,
arrows=arrows,
vertex_positions=vertex_positions)
def get_neighborhood(self, ids, radius=1, full_subgraph=True):
"""
Retrieve the graph neighborhood around a set of vertices, ignoring edge
directions. Note that setting radius greater than two often results in a
time-consuming query for a very large subgraph.
Parameters
----------
ids : list [int | float | str]
List of target vertex IDs.
radius : int, optional
Radius of the neighborhood. Every vertex in the returned subgraph is
reachable from at least one of the target vertices on a path of
length no longer than ``radius``. Setting radius larger than 2 may
result in a very large subgraph.
full_subgraph : bool, optional
If True, return all edges between vertices in the returned
neighborhood. The result is also known as the subgraph induced by
the target nodes' neighbors, or the egocentric network for the
target nodes. If False, return only edges on paths of length <=
``radius`` from the target node, also known as the reachability
graph.
Returns
-------
out : Graph
The subgraph with the neighborhoods around the target vertices.
See Also
--------
get_edges, get_vertices
References
----------
- Marsden, P. (2002) `Egocentric and sociocentric measures of network
centrality <http://www.sciencedirect.com/science/article/pii/S03788733
02000163>`_.
- `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_
Examples
--------
>>> sf_edge = graphlab.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = graphlab.SGraph()
>>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest')
>>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2,
full_subgraph=True)
"""
_mt._get_metric_tracker().track('sgraph.get_neighborhood')
verts = ids
## find the vertices within radius (and the path edges)
for i in range(radius):
edges_out = self.get_edges(src_ids=verts)
edges_in = self.get_edges(dst_ids=verts)
verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \
list(edges_out['__src_id']) + list(edges_out['__dst_id'])
verts = list(set(verts))
## make a new graph to return and add the vertices
g = SGraph()
g = g.add_vertices(self.get_vertices(verts), vid_field='__id')
## add the requested edge set
if full_subgraph is True:
induced_edge_out = self.get_edges(src_ids=verts)
induced_edge_in = self.get_edges(dst_ids=verts)
df_induced = induced_edge_out.append(induced_edge_in)
df_induced = df_induced.groupby(df_induced.column_names(), {})
verts_sa = SArray(list(verts))
edges = df_induced.filter_by(verts_sa, "__src_id")
edges = edges.filter_by(verts_sa, "__dst_id")
else:
path_edges = edges_out.append(edges_in)
edges = path_edges.groupby(path_edges.column_names(), {})
g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id')
return g
#/**************************************************************************/
#/* */
#/* Module Function */
#/* */
#/**************************************************************************/
def load_graph(filename, format='binary', delimiter='auto'):
import warnings
warnings.warn("load_graph has been renamed to load_sgraph. This function will be removed in the next release.", PendingDeprecationWarning)
return load_sgraph(filename, format=format)
def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = graphlab.SGraph().add_vertices([graphlab.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = graphlab.load_graph('mygraph')
"""
_mt._get_metric_tracker().track('sgraph.load_sgraph')
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format)
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy)
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter,
header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'csv':
if delimiter == 'auto':
delimiter = ','
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
g.summary() # materialize
return g
#/**************************************************************************/
#/* */
#/* Helper Function */
#/* */
#/**************************************************************************/
def _vertex_list_to_dataframe(ls, id_column_name):
"""
Convert a list of vertices into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
df = pd.DataFrame({id_column_name: [v.vid for v in ls]})
for c in cols:
df[c] = [v.attr.get(c) for v in ls]
return df
def _vertex_list_to_sframe(ls, id_column_name):
"""
Convert a list of vertices into an SFrame.
"""
sf = SFrame()
if type(ls) == list:
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
sf[id_column_name] = [v.vid for v in ls]
for c in cols:
sf[c] = [v.attr.get(c) for v in ls]
elif type(ls) == Vertex:
sf[id_column_name] = [ls.vid]
for col, val in ls.attr.iteritems():
sf[col] = [val]
else:
raise TypeError('Vertices type {} is Not supported.'.format(type(ls)))
return sf
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(e.attr.keys()) for e in ls))
df = pd.DataFrame({
src_column_name: [e.src_vid for e in ls],
dst_column_name: [e.dst_vid for e in ls]})
for c in cols:
df[c] = [e.attr.get(c) for e in ls]
return df
def _edge_list_to_sframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into an SFrame.
"""
sf = SFrame()
if type(ls) == list:
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
sf[src_column_name] = [e.src_vid for e in ls]
sf[dst_column_name] = [e.dst_vid for e in ls]
for c in cols:
sf[c] = [e.attr.get(c) for e in ls]
elif type(ls) == Edge:
sf[src_column_name] = [ls.src_vid]
sf[dst_column_name] = [ls.dst_vid]
else:
raise TypeError('Edges type {} is Not supported.'.format(type(ls)))
return sf
def _dataframe_to_vertex_list(df):
"""
Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN.
"""
cols = df.columns
if len(cols):
assert _VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _VID_COLUMN
df = df[cols].T
ret = [Vertex(None, _series=df[col]) for col in df]
return ret
else:
return []
def _dataframe_to_edge_list(df):
"""
Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively.
"""
cols = df.columns
if len(cols):
assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN
assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN
df = df[cols].T
ret = [Edge(None, None, _series=df[col]) for col in df]
return ret
else:
return []
def _vertex_data_to_sframe(data, vid_field):
"""
Convert data into a vertex data sframe. Using vid_field to identify the id
column. The returned sframe will have id column name '__id'.
"""
if isinstance(data, SFrame):
# '__id' already in the sframe, and it is ok to not specify vid_field
if vid_field is None and _VID_COLUMN in data.column_names():
return data
if vid_field is None:
raise ValueError("vid_field must be specified for SFrame input")
data_copy = copy.copy(data)
data_copy.rename({vid_field: _VID_COLUMN})
return data_copy
if type(data) == Vertex or type(data) == list:
return _vertex_list_to_sframe(data, '__id')
elif HAS_PANDAS and type(data) == pd.DataFrame:
if vid_field is None:
# using the dataframe index as vertex id
if data.index.is_unique:
if not ("index" in data.columns):
# pandas reset_index() will insert a new column of name "index".
sf = SFrame(data.reset_index()) # "index"
sf.rename({'index': _VID_COLUMN})
return sf
else:
# pandas reset_index() will insert a new column of name "level_0" if there exists a column named "index".
sf = SFrame(data.reset_index()) # "level_0"
sf.rename({'level_0': _VID_COLUMN})
return sf
else:
raise ValueError("Index of the vertices dataframe is not unique, \
try specifying vid_field name to use a column for vertex ids.")
else:
sf = SFrame(data)
if _VID_COLUMN in sf.column_names():
raise ValueError('%s reserved vid column name already exists in the SFrame' % _VID_COLUMN)
sf.rename({vid_field: _VID_COLUMN})
return sf
else:
raise TypeError('Vertices type %s is Not supported.' % str(type(data)))
def _edge_data_to_sframe(data, src_field, dst_field):
"""
Convert data into an edge data sframe. Using src_field and dst_field to
identify the source and target id column. The returned sframe will have id
column name '__src_id', '__dst_id'
"""
if isinstance(data, SFrame):
# '__src_vid' and '__dst_vid' already in the sframe, and
# it is ok to not specify src_field and dst_field
if src_field is None and dst_field is None and \
_SRC_VID_COLUMN in data.column_names() and \
_DST_VID_COLUMN in data.column_names():
return data
if src_field is None:
raise ValueError("src_field must be specified for SFrame input")
if dst_field is None:
raise ValueError("dst_field must be specified for SFrame input")
data_copy = copy.copy(data)
if src_field == _DST_VID_COLUMN and dst_field == _SRC_VID_COLUMN:
# special case when src_field = "__dst_id" and dst_field = "__src_id"
# directly renaming will cause name collision
dst_id_column = data_copy[_DST_VID_COLUMN]
del data_copy[_DST_VID_COLUMN]
data_copy.rename({_SRC_VID_COLUMN: _DST_VID_COLUMN})
data_copy[_SRC_VID_COLUMN] = dst_id_column
else:
data_copy.rename({src_field: _SRC_VID_COLUMN, dst_field: _DST_VID_COLUMN})
return data_copy
elif HAS_PANDAS and type(data) == pd.DataFrame:
if src_field is None:
raise ValueError("src_field must be specified for Pandas input")
if dst_field is None:
raise ValueError("dst_field must be specified for Pandas input")
sf = SFrame(data)
if src_field == _DST_VID_COLUMN and dst_field == _SRC_VID_COLUMN:
# special case when src_field = "__dst_id" and dst_field = "__src_id"
# directly renaming will cause name collision
dst_id_column = data_copy[_DST_VID_COLUMN]
del sf[_DST_VID_COLUMN]
sf.rename({_SRC_VID_COLUMN: _DST_VID_COLUMN})
sf[_SRC_VID_COLUMN] = dst_id_column
else:
sf.rename({src_field: _SRC_VID_COLUMN, dst_field: _DST_VID_COLUMN})
return sf
elif type(data) == Edge:
return _edge_list_to_sframe([data], _SRC_VID_COLUMN, _DST_VID_COLUMN)
elif type(data) == list:
return _edge_list_to_sframe(data, _SRC_VID_COLUMN, _DST_VID_COLUMN)
else:
raise TypeError('Edges type %s is Not supported.' % str(type(data)))
## Hack: overriding GFrame class name to make it appears as SFrame##
GFrame.__name__ = SFrame.__name__
GFrame.__module__ = SFrame.__module__
| bsd-3-clause |
jrbourbeau/cr-composition | notebooks/legacy/lightheavy/spectrum-analysis-xgboost.py | 1 | 27958 | #!/usr/bin/env python
from __future__ import division, print_function
from collections import defaultdict
import itertools
import numpy as np
from scipy import interp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, auc
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, KFold, StratifiedKFold
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
import composition as comp
import composition.analysis.plotting as plotting
color_dict = {'light': 'C0', 'heavy': 'C1', 'total': 'C2'}
X_train_sim, X_test_sim, y_train_sim, y_test_sim, le, energy_train_sim, energy_test_sim = comp.preprocess_sim(return_energy=True)
X_test_data, energy_test_data = comp.preprocess_data(return_energy=True)
# pipeline = comp.get_pipeline('xgboost')
# clf_name = pipeline.named_steps['classifier'].__class__.__name__
# print('=' * 30)
# print(clf_name)
# scores = cross_val_score(
# estimator=pipeline, X=X_train_sim, y=y_train_sim, cv=3, n_jobs=15)
# print('CV score: {:.2%} (+/- {:.2%})'.format(scores.mean(), scores.std()))
# print('=' * 30)
# Define energy binning for this analysis
energybins = comp.analysis.get_energybins()
# # Calculate RF generalization error via 10-fold CV
# comp_list = ['light', 'heavy']
# # Split training data into CV training and testing folds
# kf = KFold(n_splits=10)
# frac_correct_folds = defaultdict(list)
# fold_num = 0
# print('Fold ', end='')
# for train_index, test_index in kf.split(X_train_sim):
# fold_num += 1
# print('{}...'.format(fold_num), end='')
# X_train_fold, X_test_fold = X_train_sim[train_index], X_train_sim[test_index]
# y_train_fold, y_test_fold = y_train_sim[train_index], y_train_sim[test_index]
#
# energy_test_fold = energy_train_sim[test_index]
#
# reco_frac, reco_frac_err = get_frac_correct(X_train_fold, X_test_fold,
# y_train_fold, y_test_fold,
# energy_test_fold, comp_list)
# for composition in comp_list:
# frac_correct_folds[composition].append(reco_frac[composition])
# frac_correct_folds['total'].append(reco_frac['total'])
# frac_correct_gen_err = {key: np.std(frac_correct_folds[key], axis=0) for key in frac_correct_folds}
df_sim = comp.load_dataframe(datatype='sim', config='IC79')
# reco_frac, reco_frac_stat_err = get_frac_correct(X_train_sim, X_test_sim,
# y_train_sim, y_test_sim,
# energy_test_sim, comp_list)
# step_x = log_energy_midpoints
# step_x = np.append(step_x[0]-log_energy_bin_width/2, step_x)
# step_x = np.append(step_x, step_x[-1]+log_energy_bin_width/2)
# # Plot fraction of events correctlt classified vs energy
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# err = np.sqrt(frac_correct_gen_err[composition]**2 + reco_frac_stat_err[composition]**2)
# plotting.plot_steps(log_energy_midpoints, reco_frac[composition], err, ax, color_dict[composition], composition)
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('Fraction correctly identified')
# ax.set_ylim([0.0, 1.0])
# ax.set_xlim([6.3, 8.0])
# ax.grid(linestyle=':')
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # place a text box in upper left in axes coords
# textstr = '$\mathrm{\underline{Training \ features}}$: \n'
# # for i, label in enumerate(feature_labels):
# # for i, idx in enumerate(sfs.k_feature_idx_):
# # # if i>1:
# # # break
# # print(feature_labels[idx])
# # # textstr += '{}) '.format(i+1) + feature_labels[idx] + '\n'
# # if (i == len(feature_labels)-1):
# # textstr += '{}) '.format(i+1) + feature_labels[idx]
# # else:
# # textstr += '{}) '.format(i+1) + feature_labels[idx] + '\n'
# props = dict(facecolor='white', linewidth=0)
# # ax.text(1.025, 0.855, textstr, transform=ax.transAxes, fontsize=8,
# # verticalalignment='top', bbox=props)
# cv_str = 'Accuracy: {:0.2f}\% (+/- {:.1}\%)'.format(scores.mean()*100, scores.std()*100)
# # print(cvstr)
# # props = dict(facecolor='white', linewidth=0)
# # ax.text(1.025, 0.9825, cvstr, transform=ax.transAxes, fontsize=8,
# # verticalalignment='top', bbox=props)
# ax.text(7.4, 0.2, cv_str,
# ha="center", va="center", size=8,
# bbox=dict(boxstyle='round', fc="white", ec="gray", lw=0.8))
# plt.show()
#
#
# # ## Spectrum
# # [ [back to top](#top) ]
#
# # In[11]:
#
# def get_num_comp_reco(X_train, y_train, X_test, log_energy_test, comp_list):
#
# pipeline.fit(X_train, y_train)
# test_predictions = pipeline.predict(X_test)
#
# # Get number of correctly identified comp in each reco energy bin
# num_reco_energy, num_reco_energy_err = {}, {}
# for composition in comp_list:
# num_reco_energy[composition] = np.histogram(
# log_energy_test[le.inverse_transform(test_predictions) == composition],
# bins=log_energy_bins)[0]
# num_reco_energy_err[composition] = np.sqrt(num_reco_energy[composition])
#
# num_reco_energy['total'] = np.histogram(log_energy_test, bins=log_energy_bins)[0]
# num_reco_energy_err['total'] = np.sqrt(num_reco_energy['total'])
#
# return num_reco_energy, num_reco_energy_err
#
#
# # In[ ]:
#
# df_sim = comp.load_dataframe(datatype='sim', config='IC79')
#
#
# # In[14]:
#
# comp_list = ['light', 'heavy']
# # Get number of events per energy bin
# num_reco_energy, num_reco_energy_err = get_num_comp_reco(X_train_sim, y_train_sim,
# X_test_data, energy_test_data,
# comp_list)
# import pprint
# pprint.pprint(num_reco_energy)
# print(np.sum(num_reco_energy['light']+num_reco_energy['heavy']))
# print(np.sum(num_reco_energy['total']))
# # Solid angle
# solid_angle = 2*np.pi*(1-np.cos(np.arccos(0.8)))
#
#
# # In[15]:
#
# # Live-time information
# goodrunlist = pd.read_table('/data/ana/CosmicRay/IceTop_GRL/IC79_2010_GoodRunInfo_4IceTop.txt', skiprows=[0, 3])
# goodrunlist.head()
#
#
# # In[16]:
#
# livetimes = goodrunlist['LiveTime(s)']
# livetime = np.sum(livetimes[goodrunlist['Good_it_L2'] == 1])
# print('livetime (seconds) = {}'.format(livetime))
# print('livetime (days) = {}'.format(livetime/(24*60*60)))
#
#
# # In[17]:
#
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = num_reco_energy[composition]
# y_err = num_reco_energy_err[composition]
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err,
# # color=color_dict[composition], label=composition,
# # marker='.', linestyle='None')
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('Rate [s$^{-1}$]')
# ax.set_xlim([6.2, 8.0])
# # ax.set_ylim([10**2, 10**5])
# ax.grid(linestyle=':')
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# plt.show()
#
#
# # In[18]:
#
# eff_area, eff_area_error, energy_midpoints = comp.analysis.get_effective_area(df_sim, energy_bins)
#
#
# # In[19]:
#
# # Plot fraction of events vs energy
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = num_reco_energy[composition]/energy_bin_widths
# y_err = num_reco_energy_err[composition]/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # Add energy scaling
# # energy_err = get_energy_res(df_sim, energy_bins)
# # energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = energy_midpoints**2.7 * y
# y_err = energy_midpoints**2.7 * y_err
# print(y)
# print(y_err)
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err, label=composition, color=color_dict[composition],
# # marker='.', markersize=8)
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# # ax.set_xscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.3, 8])
# ax.set_ylim([10**3, 10**5])
# ax.grid(linestyle='dotted', which="both")
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # plt.savefig('/home/jbourbeau/public_html/figures/spectrum.png')
# plt.show()
#
#
# # ## Unfolding
# # [ [back to top](#top) ]
#
# # In[20]:
#
# reco_frac['light']
#
#
# # In[21]:
#
# reco_frac['heavy']
#
#
# # In[22]:
#
# num_reco_energy['light']
#
#
# # In[23]:
#
# num_reco_energy['heavy']
#
#
# # In[24]:
#
# pipeline.fit(X_train_sim, y_train_sim)
# test_predictions = pipeline.predict(X_test_sim)
# true_comp = le.inverse_transform(y_test_sim)
# pred_comp = le.inverse_transform(test_predictions)
# print(true_comp)
# print(pred_comp)
#
#
# # In[25]:
#
# bin_idxs = np.digitize(energy_test_sim, log_energy_bins) - 1
# energy_bin_idx = np.unique(bin_idxs)
# energy_bin_idx = energy_bin_idx[1:]
# print(energy_bin_idx)
# num_reco_energy_unfolded = defaultdict(list)
# for bin_idx in energy_bin_idx:
# energy_bin_mask = bin_idxs == bin_idx
# confmat = confusion_matrix(true_comp[energy_bin_mask], pred_comp[energy_bin_mask], labels=comp_list)
# confmat = np.divide(confmat.T, confmat.sum(axis=1, dtype=float)).T
# inv_confmat = np.linalg.inv(confmat)
# counts = np.array([num_reco_energy[composition][bin_idx] for composition in comp_list])
# unfolded_counts = np.dot(inv_confmat, counts)
# # unfolded_counts[unfolded_counts < 0] = 0
# num_reco_energy_unfolded['light'].append(unfolded_counts[0])
# num_reco_energy_unfolded['heavy'].append(unfolded_counts[1])
# num_reco_energy_unfolded['total'].append(unfolded_counts.sum())
# print(num_reco_energy_unfolded)
#
#
# # In[26]:
#
# unfolded_counts.sum()
#
#
# # In[27]:
#
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = num_reco_energy_unfolded[composition]/energy_bin_widths
# y_err = np.sqrt(y)/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # Add energy scaling
# # energy_err = get_energy_res(df_sim, energy_bins)
# # energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = energy_midpoints**2.7 * y
# y_err = energy_midpoints**2.7 * y_err
# print(y)
# print(y_err)
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err, label=composition, color=color_dict[composition],
# # marker='.', markersize=8)
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# # ax.set_xscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.3, 8])
# ax.set_ylim([10**3, 10**5])
# ax.grid(linestyle='dotted', which="both")
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # plt.savefig('/home/jbourbeau/public_html/figures/spectrum.png')
# plt.show()
#
#
# # ### Iterative method
#
# # Get confusion matrix for each energy bin
#
# # In[99]:
#
# bin_idxs = np.digitize(energy_test_sim, log_energy_bins) - 1
# energy_bin_idx = np.unique(bin_idxs)
# energy_bin_idx = energy_bin_idx[1:]
# print(energy_bin_idx)
# num_reco_energy_unfolded = defaultdict(list)
# response_mat = []
# for bin_idx in energy_bin_idx:
# energy_bin_mask = bin_idxs == bin_idx
# confmat = confusion_matrix(true_comp[energy_bin_mask], pred_comp[energy_bin_mask], labels=comp_list)
# confmat = np.divide(confmat.T, confmat.sum(axis=1, dtype=float)).T
# response_mat.append(confmat)
#
#
# # In[100]:
#
# response_mat
#
#
# # In[134]:
#
# r = np.dstack((np.copy(num_reco_energy['light']), np.copy(num_reco_energy['heavy'])))[0]
# for unfold_iter in range(50):
# print('Unfolding iteration {}...'.format(unfold_iter))
# if unfold_iter == 0:
# u = r
# fs = []
# for bin_idx in energy_bin_idx:
# # print(u)
# f = np.dot(response_mat[bin_idx], u[bin_idx])
# f[f < 0] = 0
# fs.append(f)
# # print(f)
# u = u + (r - fs)
# # u[u < 0] = 0
# # print(u)
# unfolded_counts_iter = {}
# unfolded_counts_iter['light'] = u[:,0]
# unfolded_counts_iter['heavy'] = u[:,1]
# unfolded_counts_iter['total'] = u.sum(axis=1)
# print(unfolded_counts_iter)
#
#
# # In[135]:
#
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = unfolded_counts_iter[composition]/energy_bin_widths
# y_err = np.sqrt(y)/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # Add energy scaling
# # energy_err = get_energy_res(df_sim, energy_bins)
# # energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = energy_midpoints**2.7 * y
# y_err = energy_midpoints**2.7 * y_err
# print(y)
# print(y_err)
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err, label=composition, color=color_dict[composition],
# # marker='.', markersize=8)
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# # ax.set_xscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.3, 8])
# ax.set_ylim([10**3, 10**5])
# ax.grid(linestyle='dotted', which="both")
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # plt.savefig('/home/jbourbeau/public_html/figures/spectrum.png')
# plt.show()
#
#
# # In[106]:
#
# print(num_reco_energy)
#
#
# # In[107]:
#
# comp_list = ['light', 'heavy']
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train_sim, y_train_sim)
# test_predictions = pipeline.predict(X_test_sim)
# # correctly_identified_mask = (test_predictions == y_test)
# # confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)/len(y_pred)
# true_comp = le.inverse_transform(y_test_sim)
# pred_comp = le.inverse_transform(test_predictions)
# confmat = confusion_matrix(true_comp, pred_comp, labels=comp_list)
#
# def plot_confusion_matrix(cm, classes,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Greens):
# """
# This function prints and plots the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
# """
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
#
# print(cm)
#
# plt.imshow(cm, interpolation='None', cmap=cmap,
# vmin=0, vmax=1.0)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
# plt.yticks(tick_marks, classes)
#
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, '{:0.3f}'.format(cm[i, j]),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
#
# plt.tight_layout()
# plt.ylabel('True composition')
# plt.xlabel('Predicted composition')
#
# fig, ax = plt.subplots()
# plot_confusion_matrix(confmat, classes=['light', 'heavy'], normalize=True,
# title='Confusion matrix, without normalization')
#
# # # Plot normalized confusion matrix
# # plt.figure()
# # plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
# # title='Normalized confusion matrix')
#
# plt.show()
#
#
# # In[63]:
#
# comp_list = ['light', 'heavy']
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train_sim, y_train_sim)
# test_predictions = pipeline.predict(X_test_sim)
# # correctly_identified_mask = (test_predictions == y_test)
# # confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)/len(y_pred)
# true_comp = le.inverse_transform(y_test_sim)
# pred_comp = le.inverse_transform(test_predictions)
# confmat = confusion_matrix(true_comp, pred_comp, labels=comp_list)
#
# inverse = np.linalg.inv(confmat)
# inverse
#
#
# # In[64]:
#
# confmat
#
#
# # In[66]:
#
# comp_list = ['light', 'heavy']
# # Get number of events per energy bin
# num_reco_energy, num_reco_energy_err = get_num_comp_reco(X_train_sim, y_train_sim, X_test_data, comp_list)
# # Energy-related variables
# energy_bin_width = 0.1
# energy_bins = np.arange(6.2, 8.1, energy_bin_width)
# energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2
# energy_bin_widths = 10**energy_bins[1:] - 10**energy_bins[:-1]
# def get_energy_res(df_sim, energy_bins):
# reco_log_energy = df_sim['lap_log_energy'].values
# MC_log_energy = df_sim['MC_log_energy'].values
# energy_res = reco_log_energy - MC_log_energy
# bin_centers, bin_medians, energy_err = comp.analysis.data_functions.get_medians(reco_log_energy,
# energy_res,
# energy_bins)
# return np.abs(bin_medians)
# # Solid angle
# solid_angle = 2*np.pi*(1-np.cos(np.arccos(0.85)))
# # solid_angle = 2*np.pi*(1-np.cos(40*(np.pi/180)))
# print(solid_angle)
# print(2*np.pi*(1-np.cos(40*(np.pi/180))))
# # Live-time information
# start_time = np.amin(df_data['start_time_mjd'].values)
# end_time = np.amax(df_data['end_time_mjd'].values)
# day_to_sec = 24 * 60 * 60.
# dt = day_to_sec * (end_time - start_time)
# print(dt)
# # Plot fraction of events vs energy
# fig, ax = plt.subplots()
# for i, composition in enumerate(comp_list):
# num_reco_bin = np.array([[i, j] for i, j in zip(num_reco_energy['light'], num_reco_energy['heavy'])])
# # print(num_reco_bin)
# num_reco = np.array([np.dot(inverse, i) for i in num_reco_bin])
# print(num_reco)
# num_reco_2 = {'light': num_reco[:, 0], 'heavy': num_reco[:, 1]}
# # Calculate dN/dE
# y = num_reco_2[composition]/energy_bin_widths
# y_err = num_reco_energy_err[composition]/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / dt
# y_err = y / dt
# # Add energy scaling
# energy_err = get_energy_res(df_sim, energy_bins)
# energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = (10**energy_midpoints)**2.7 * y
# y_err = (10**energy_midpoints)**2.7 * y_err
# plotting.plot_steps(energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.2, 8.0])
# # ax.set_ylim([10**2, 10**5])
# ax.grid()
# leg = plt.legend(loc='upper center',
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# plt.show()
#
#
# # In[44]:
#
# pipeline.get_params()['classifier__max_depth']
#
#
# # In[47]:
#
# energy_bin_width = 0.1
# energy_bins = np.arange(6.2, 8.1, energy_bin_width)
# fig, axarr = plt.subplots(1, 2)
# for composition, ax in zip(comp_list, axarr.flatten()):
# MC_comp_mask = (df_sim['MC_comp_class'] == composition)
# MC_log_energy = df_sim['MC_log_energy'][MC_comp_mask].values
# reco_log_energy = df_sim['lap_log_energy'][MC_comp_mask].values
# plotting.histogram_2D(MC_log_energy, reco_log_energy, energy_bins, log_counts=True, ax=ax)
# ax.plot([0,10], [0,10], marker='None', linestyle='-.')
# ax.set_xlim([6.2, 8])
# ax.set_ylim([6.2, 8])
# ax.set_xlabel('$\log_{10}(E_{\mathrm{MC}}/\mathrm{GeV})$')
# ax.set_ylabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_title('{} response matrix'.format(composition))
# plt.tight_layout()
# plt.show()
#
#
# # In[10]:
#
# energy_bins = np.arange(6.2, 8.1, energy_bin_width)
# 10**energy_bins[1:] - 10**energy_bins[:-1]
#
#
# # In[ ]:
#
# probs = pipeline.named_steps['classifier'].predict_proba(X_test)
# prob_1 = probs[:, 0][MC_iron_mask]
# prob_2 = probs[:, 1][MC_iron_mask]
# # print(min(prob_1-prob_2))
# # print(max(prob_1-prob_2))
# # plt.hist(prob_1-prob_2, bins=30, log=True)
# plt.hist(prob_1, bins=np.linspace(0, 1, 50), log=True)
# plt.hist(prob_2, bins=np.linspace(0, 1, 50), log=True)
#
#
# # In[ ]:
#
# probs = pipeline.named_steps['classifier'].predict_proba(X_test)
# dp1 = (probs[:, 0]-probs[:, 1])[MC_proton_mask]
# print(min(dp1))
# print(max(dp1))
# dp2 = (probs[:, 0]-probs[:, 1])[MC_iron_mask]
# print(min(dp2))
# print(max(dp2))
# fig, ax = plt.subplots()
# # plt.hist(prob_1-prob_2, bins=30, log=True)
# counts, edges, pathes = plt.hist(dp1, bins=np.linspace(-1, 1, 100), log=True, label='Proton', alpha=0.75)
# counts, edges, pathes = plt.hist(dp2, bins=np.linspace(-1, 1, 100), log=True, label='Iron', alpha=0.75)
# plt.legend(loc=2)
# plt.show()
# pipeline.named_steps['classifier'].classes_
#
#
# # In[ ]:
#
# print(pipeline.named_steps['classifier'].classes_)
# le.inverse_transform(pipeline.named_steps['classifier'].classes_)
#
#
# # In[ ]:
#
# pipeline.named_steps['classifier'].decision_path(X_test)
#
#
# # In[48]:
#
# comp_list = ['light', 'heavy']
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train_sim, y_train_sim)
# # test_probs = defaultdict(list)
# fig, ax = plt.subplots()
# test_predictions = pipeline.predict(X_test_data)
# test_probs = pipeline.predict_proba(X_test_data)
# for class_ in pipeline.classes_:
# test_predictions == le.inverse_transform(class_)
# plt.hist(test_probs[:, class_], bins=np.linspace(0, 1, 50),
# histtype='step', label=composition,
# color=color_dict[composition], alpha=0.8, log=True)
# plt.ylabel('Counts')
# plt.xlabel('Testing set class probabilities')
# plt.legend()
# plt.grid()
# plt.show()
#
#
# # In[5]:
#
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train, y_train)
# test_predictions = pipeline.predict(X_test)
#
# comp_list = ['P', 'He', 'O', 'Fe']
# fig, ax = plt.subplots()
# test_probs = pipeline.predict_proba(X_test)
# fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
# for composition, ax in zip(comp_list, axarr.flatten()):
# comp_mask = (le.inverse_transform(y_test) == composition)
# probs = np.copy(test_probs[comp_mask])
# print('probs = {}'.format(probs.shape))
# weighted_mass = np.zeros(len(probs))
# for class_ in pipeline.classes_:
# c = le.inverse_transform(class_)
# weighted_mass += comp.simfunctions.comp2mass(c) * probs[:, class_]
# print('min = {}'.format(min(weighted_mass)))
# print('max = {}'.format(max(weighted_mass)))
# ax.hist(weighted_mass, bins=np.linspace(0, 5, 100),
# histtype='step', label=None, color='darkgray',
# alpha=1.0, log=False)
# for c in comp_list:
# ax.axvline(comp.simfunctions.comp2mass(c), color=color_dict[c],
# marker='None', linestyle='-')
# ax.set_ylabel('Counts')
# ax.set_xlabel('Weighted atomic number')
# ax.set_title('MC {}'.format(composition))
# ax.grid()
# plt.tight_layout()
# plt.show()
#
#
# # In[15]:
#
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train, y_train)
# test_predictions = pipeline.predict(X_test)
#
# comp_list = ['P', 'He', 'O', 'Fe']
# fig, ax = plt.subplots()
# test_probs = pipeline.predict_proba(X_test)
# fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
# for composition, ax in zip(comp_list, axarr.flatten()):
# comp_mask = (le.inverse_transform(y_test) == composition)
# probs = np.copy(test_probs[comp_mask])
# weighted_mass = np.zeros(len(probs))
# for class_ in pipeline.classes_:
# c = le.inverse_transform(class_)
# ax.hist(probs[:, class_], bins=np.linspace(0, 1, 50),
# histtype='step', label=c, color=color_dict[c],
# alpha=1.0, log=True)
# ax.legend(title='Reco comp', framealpha=0.5)
# ax.set_ylabel('Counts')
# ax.set_xlabel('Testing set class probabilities')
# ax.set_title('MC {}'.format(composition))
# ax.grid()
# plt.tight_layout()
# plt.show()
#
#
# # In[25]:
#
# comp_list = ['light', 'heavy']
# test_probs = defaultdict(list)
# fig, ax = plt.subplots()
# # test_probs = pipeline.predict_proba(X_test)
# for event in pipeline.predict_proba(X_test_data):
# composition = le.inverse_transform(np.argmax(event))
# test_probs[composition].append(np.amax(event))
# for composition in comp_list:
# plt.hist(test_probs[composition], bins=np.linspace(0, 1, 100),
# histtype='step', label=composition,
# color=color_dict[composition], alpha=0.8, log=False)
# plt.ylabel('Counts')
# plt.xlabel('Testing set class probabilities')
# plt.legend(title='Reco comp')
# plt.grid()
# plt.show()
#
#
# # In[ ]:
#
#
#
#
# # In[ ]:
#
#
#
| mit |
viekie/tensorflow-tutorial | chap03/word2vector.py | 1 | 3414 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Power by viekie2017-08-26 09:24:00
##
# @file word2vector.py
# @brief
# @author viekiedu@gmail.com
# @version 1.0
# @date 2017-08-26
import collections
import matplotlib
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from six.moves import xrange
matplotlib.use('Agg')
batch_size = 20
embedding_size = 2
num_sample = 15
sentences = ["the quick brown fox jumped over the lazy dog",
"I love cats and dogs",
"we all love cats and dogs",
"cats and dogs are great",
"sung likes cats",
"she loves dogs",
"cats can be very independent",
"cats are great companions when they want to be",
"cats are playful",
"cats are natural hunters",
"It's raining cats and dogs",
"dogs and cats love sung"]
words = ' '.join(sentences).split()
count = collections.Counter(words).most_common()
print('word count', count[:5])
rdic = [i[0] for i in count]
dic = {w: i for i, w in enumerate(rdic)}
voc_size = len(dic)
data = [dic[word] for word in words]
print('sample data', data[:10], [rdic[t] for t in data[:10]])
cbow_pairs = []
for i in xrange(1, len(data)-1):
cbow_pairs.append([[data[i-1], data[i+1]], data[i]])
print('context pairs', cbow_pairs[:10])
skip_gram_pairs = []
for c in cbow_pairs:
skip_gram_pairs.append([c[1], c[0][0]])
skip_gram_pairs.append([c[1], c[0][1]])
print('skip-gram pairs', skip_gram_pairs[:5])
def generate_batch(size):
assert size < len(skip_gram_pairs)
x_data = []
y_data = []
r = np.random.choice(xrange(len(skip_gram_pairs)), size, replace=False)
for i in r:
x_data.append(skip_gram_pairs[i][0])
y_data.append([skip_gram_pairs[i][1]])
return x_data, y_data
print('batches(x, y)', generate_batch(3))
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([voc_size, embedding_size],
-1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
nce_weights = tf.Variable(tf.random_uniform([voc_size, embedding_size],
-1.0, 1.0))
nce_biases = tf.Variable(tf.zeros([voc_size]))
loss = tf.reduce_mean(tf.nn.nce_loss(nce_weights, nce_biases, train_labels,
embed, num_sample, voc_size))
train_op = tf.train.AdamOptimizer(1e-1).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
for step in xrange(1000):
batch_inputs, batch_labels = generate_batch(batch_size)
_, loss_val = sess.run([train_op, loss],
feed_dict={train_inputs: batch_inputs,
train_labels: batch_labels})
if step % 10 == 0:
print("loss at", step, loss_val)
trained_embeddings = embeddings.eval()
if trained_embeddings.shape[1] == 2:
labels = rdic[:10]
for i, label in enumerate(labels):
x, y = trained_embeddings[i, :]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2),
textcoords='offset points', ha='right', va='bottom')
plt.savefig('word2vec.png')
| apache-2.0 |
phobson/wqio | wqio/hydro.py | 2 | 37198 | import warnings
import numpy
from matplotlib import pyplot
from matplotlib import dates
from matplotlib import gridspec
import seaborn
import pandas
from wqio import utils
from wqio import viz
from wqio import validate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
SEC_PER_MINUTE = 60.0
MIN_PER_HOUR = 60.0
HOUR_PER_DAY = 24.0
SEC_PER_HOUR = SEC_PER_MINUTE * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOUR_PER_DAY
def _wet_first_row(df, wetcol, diffcol):
# make sure that if the first record is associated with the first
# storm if it's wet
firstrow = df.iloc[0]
if firstrow[wetcol]:
df.loc[firstrow.name, diffcol] = 1
return df
def _wet_window_diff(is_wet, ie_periods):
return (
is_wet.rolling(int(ie_periods), min_periods=1)
.apply(lambda window: window.any(), raw=False)
.diff()
)
def parse_storm_events(
data,
intereventHours,
outputfreqMinutes,
precipcol=None,
inflowcol=None,
outflowcol=None,
baseflowcol=None,
stormcol="storm",
debug=False,
):
"""Parses the hydrologic data into distinct storms.
In this context, a storm is defined as starting whenever the
hydrologic records shows non-zero precipitation or [in|out]flow
from the BMP after a minimum inter-event dry period duration
specified in the the function call. The storms ends the observation
*after* the last non-zero precipitation or flow value.
Parameters
----------
data : pandas.DataFrame
intereventHours : float
The Inter-Event dry duration (in hours) that classifies the
next hydrlogic activity as a new event.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
baseflowcol : string, optional (default = None)
Name of column in `hydrodata` containing boolean indicating
which records are considered baseflow.
stormcol : string (default = 'storm')
Name of column in `hydrodata` indentifying distinct storms.
debug : bool (default = False)
If True, diagnostic columns will not be dropped prior to
returning the dataframe of parsed_storms.
Writes
------
None
Returns
-------
parsed_storms : pandas.DataFrame
Copy of the origin `hydrodata` DataFrame, but resampled to a
fixed frequency, columns possibly renamed, and a `storm` column
added to denote the storm to which each record belongs. Records
where `storm` == 0 are not a part of any storm.
"""
# pull out the rain and flow data
if precipcol is None:
precipcol = "precip"
data.loc[:, precipcol] = numpy.nan
if inflowcol is None:
inflowcol = "inflow"
data.loc[:, inflowcol] = numpy.nan
if outflowcol is None:
outflowcol = "outflow"
data.loc[:, outflowcol] = numpy.nan
if baseflowcol is None:
baseflowcol = "baseflow"
data.loc[:, baseflowcol] = False
# bool column where True means there's rain or flow of some kind
water_columns = [inflowcol, outflowcol, precipcol]
cols_to_use = water_columns + [baseflowcol]
agg_dict = {
precipcol: numpy.sum,
inflowcol: numpy.mean,
outflowcol: numpy.mean,
baseflowcol: numpy.any,
}
freq = pandas.offsets.Minute(outputfreqMinutes)
ie_periods = int(MIN_PER_HOUR / freq.n * intereventHours)
# periods between storms are where the cumulative number
# of storms that have ended are equal to the cumulative
# number of storms that have started.
# Stack Overflow: http://tinyurl.com/lsjkr9x
res = (
data.resample(freq)
.agg(agg_dict)
.loc[:, lambda df: df.columns.isin(cols_to_use)]
.assign(
__wet=lambda df: numpy.any(df[water_columns] > 0, axis=1) & ~df[baseflowcol]
)
.assign(__windiff=lambda df: _wet_window_diff(df["__wet"], ie_periods))
.pipe(_wet_first_row, "__wet", "__windiff")
.assign(__event_start=lambda df: df["__windiff"] == 1)
.assign(__event_end=lambda df: df["__windiff"].shift(-1 * ie_periods) == -1)
.assign(__storm=lambda df: df["__event_start"].cumsum())
.assign(
storm=lambda df: numpy.where(
df["__storm"] == df["__event_end"].shift(2).cumsum(),
0, # inter-event periods marked as zero
df["__storm"], # actual events keep their number
)
)
)
if not debug:
res = res.loc[:, res.columns.map(lambda c: not c.startswith("__"))]
return res
class Storm(object):
""" Object representing a storm event
Parameters
----------
dataframe : pandas.DataFrame
A datetime-indexed Dataframe containing all of the hydrologic
data and am interger column indentifying distinct storms.
stormnumber : int
The storm we care about.
precipcol, inflowcol, outflow, tempcol, stormcol : string, optional
Names for columns representing each hydrologic quantity.
freqMinutes : float (default = 5)
The time period, in minutes, between observations.
volume_conversion : float, optional (default = 1)
Conversion factor to go from flow to volume for a single
observation.
"""
# TODO: rename freqMinutes to periodMinutes
def __init__(
self,
dataframe,
stormnumber,
precipcol="precip",
inflowcol="inflow",
outflowcol="outflow",
tempcol="temp",
stormcol="storm",
freqMinutes=5,
volume_conversion=1,
):
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.precipcol = precipcol
self.tempcol = tempcol
self.stormnumber = stormnumber
self.freqMinutes = freqMinutes
self.volume_conversion = volume_conversion * SEC_PER_MINUTE * self.freqMinutes
# basic data
self.data = dataframe[dataframe[stormcol] == self.stormnumber].copy()
self.hydrofreq_label = "{0} min".format(self.freqMinutes)
# tease out start/stop info
self.start = self.data.index[0]
self.end = self.data.index[-1]
self._season = utils.getSeason(self.start)
# storm duration (hours)
duration = self.end - self.start
self.duration_hours = duration.total_seconds() / SEC_PER_HOUR
# antecedent dry period (hours)
if self.stormnumber > 1:
prev_storm_mask = dataframe[stormcol] == self.stormnumber - 1
previous_end = dataframe[prev_storm_mask].index[-1]
antecedent_timedelta = self.start - previous_end
self.antecedent_period_days = (
antecedent_timedelta.total_seconds() / SEC_PER_DAY
)
else:
self.antecedent_period_days = numpy.nan
# quantities
self._precip = None
self._inflow = None
self._outflow = None
# starts and stop
self._precip_start = None
self._precip_end = None
self._inflow_start = None
self._inflow_end = None
self._outflow_start = None
self._outflow_end = None
# peaks
self._peak_precip_intensity = None
self._peak_inflow = None
self._peak_outflow = None
# times of peaks
self._peak_precip_intensity_time = None
self._peak_inflow_time = None
self._peak_outflow_time = None
self._peak_lag_hours = None
# centroids
self._centroid_precip_time = None
self._centroid_inflow_time = None
self._centroid_outflow_time = None
self._centroid_lag_hours = None
# totals
self._total_precip_depth = None
self._total_inflow_volume = None
self._total_outflow_volume = None
self.meta = {
self.outflowcol: {
"name": "Flow (calculated, L/s)",
"ylabel": "Effluent flow (L/s)",
"color": "CornFlowerBlue",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": 0,
},
self.inflowcol: {
"name": "Inflow (estimated, L/s)",
"ylabel": "Estimated influent flow (L/s)",
"color": "Maroon",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": 0,
},
self.precipcol: {
"name": "Precip (mm)",
"ylabel": "%s Precip.\nDepth (mm)" % self.hydrofreq_label,
"color": "DarkGreen",
"linewidth": 1.5,
"alpha": 0.4,
"ymin": 0,
},
self.tempcol: {
"name": "Air Temp (deg C)",
"ylabel": "Air Temperature (deg. C)",
"color": "DarkGoldenRod",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": None,
},
}
self._summary_dict = None
@property
def precip(self):
if self._precip is None:
if self.precipcol is not None:
self._precip = self.data[self.data[self.precipcol] > 0][self.precipcol]
else:
self._precip = numpy.array([])
return self._precip
@property
def inflow(self):
if self._inflow is None:
if self.inflowcol is not None:
self._inflow = self.data[self.data[self.inflowcol] > 0][self.inflowcol]
else:
self._inflow = numpy.array([])
return self._inflow
@property
def outflow(self):
if self._outflow is None:
if self.outflowcol is not None:
self._outflow = self.data[self.data[self.outflowcol] > 0][
self.outflowcol
]
else:
self._outflow = numpy.array([])
return self._outflow
@property
def has_precip(self):
return self.precip.shape[0] > 0
@property
def has_inflow(self):
return self.inflow.shape[0] > 0
@property
def has_outflow(self):
return self.outflow.shape[0] > 0
@property
def season(self):
return self._season
@season.setter
def season(self, value):
self._season = value
# starts and stops
@property
def precip_start(self):
if self._precip_start is None and self.has_precip:
self._precip_start = self._get_event_time(self.precipcol, "start")
return self._precip_start
@property
def precip_end(self):
if self._precip_end is None and self.has_precip:
self._precip_end = self._get_event_time(self.precipcol, "end")
return self._precip_end
@property
def inflow_start(self):
if self._inflow_start is None and self.has_inflow:
self._inflow_start = self._get_event_time(self.inflowcol, "start")
return self._inflow_start
@property
def inflow_end(self):
if self._inflow_end is None and self.has_inflow:
self._inflow_end = self._get_event_time(self.inflowcol, "end")
return self._inflow_end
@property
def outflow_start(self):
if self._outflow_start is None and self.has_outflow:
self._outflow_start = self._get_event_time(self.outflowcol, "start")
return self._outflow_start
@property
def outflow_end(self):
if self._outflow_end is None and self.has_outflow:
self._outflow_end = self._get_event_time(self.outflowcol, "end")
return self._outflow_end
@property
def _peak_depth(self):
if self.has_precip:
return self.precip.max()
@property
def peak_precip_intensity(self):
if self._peak_precip_intensity is None and self.has_precip:
self._peak_precip_intensity = (
self._peak_depth * MIN_PER_HOUR / self.freqMinutes
)
return self._peak_precip_intensity
@property
def peak_inflow(self):
if self._peak_inflow is None and self.has_inflow:
self._peak_inflow = self.inflow.max()
return self._peak_inflow
@property
def peak_outflow(self):
if self._peak_outflow is None and self.has_outflow:
self._peak_outflow = self.outflow.max()
return self._peak_outflow
@property
def total_precip_depth(self):
if self._total_precip_depth is None and self.has_precip:
self._total_precip_depth = self.data[self.precipcol].sum()
return self._total_precip_depth
@property
def total_inflow_volume(self):
if self._total_inflow_volume is None and self.has_inflow:
self._total_inflow_volume = (
self.data[self.inflowcol].sum() * self.volume_conversion
)
return self._total_inflow_volume
@property
def total_outflow_volume(self):
if self._total_outflow_volume is None and self.has_outflow:
self._total_outflow_volume = (
self.data[self.outflowcol].sum() * self.volume_conversion
)
return self._total_outflow_volume
@property
def centroid_precip_time(self):
if self._centroid_precip_time is None and self.has_precip:
self._centroid_precip_time = self._compute_centroid(self.precipcol)
return self._centroid_precip_time
@property
def centroid_inflow_time(self):
if self._centroid_inflow_time is None and self.has_inflow:
self._centroid_inflow_time = self._compute_centroid(self.inflowcol)
return self._centroid_inflow_time
@property
def centroid_outflow_time(self):
if self._centroid_outflow_time is None and self.has_outflow:
self._centroid_outflow_time = self._compute_centroid(self.outflowcol)
return self._centroid_outflow_time
@property
def centroid_lag_hours(self):
if (
self._centroid_lag_hours is None
and self.centroid_outflow_time is not None
and self.centroid_inflow_time is not None
):
self._centroid_lag_hours = (
self.centroid_outflow_time - self.centroid_inflow_time
).total_seconds() / SEC_PER_HOUR
return self._centroid_lag_hours
@property
def peak_precip_intensity_time(self):
if self._peak_precip_intensity_time is None and self.has_precip:
PI_selector = self.data[self.precipcol] == self._peak_depth
self._peak_precip_intensity_time = self.data[PI_selector].index[0]
return self._peak_precip_intensity_time
@property
def peak_inflow_time(self):
if self._peak_inflow_time is None and self.has_inflow:
PInf_selector = self.data[self.inflowcol] == self.peak_inflow
self._peak_inflow_time = self.data[PInf_selector].index[0]
return self._peak_inflow_time
@property
def peak_outflow_time(self):
if self._peak_outflow_time is None and self.has_outflow:
PEff_selector = self.data[self.outflowcol] == self.peak_outflow
if PEff_selector.sum() > 0:
self._peak_outflow_time = self.data[PEff_selector].index[0]
return self._peak_outflow_time
@property
def peak_lag_hours(self):
if (
self._peak_lag_hours is None
and self.peak_outflow_time is not None
and self.peak_inflow_time is not None
):
time_delta = self.peak_outflow_time - self.peak_inflow_time
self._peak_lag_hours = time_delta.total_seconds() / SEC_PER_HOUR
return self._peak_lag_hours
@property
def summary_dict(self):
if self._summary_dict is None:
self._summary_dict = {
"Storm Number": self.stormnumber,
"Antecedent Days": self.antecedent_period_days,
"Start Date": self.start,
"End Date": self.end,
"Duration Hours": self.duration_hours,
"Peak Precip Intensity": self.peak_precip_intensity,
"Total Precip Depth": self.total_precip_depth,
"Total Inflow Volume": self.total_inflow_volume,
"Peak Inflow": self.peak_inflow,
"Total Outflow Volume": self.total_outflow_volume,
"Peak Outflow": self.peak_outflow,
"Peak Lag Hours": self.peak_lag_hours,
"Centroid Lag Hours": self.centroid_lag_hours,
"Season": self.season,
}
return self._summary_dict
def is_small(self, minprecip=0.0, mininflow=0.0, minoutflow=0.0):
""" Determines whether a storm can be considered "small".
Parameters
----------
minprecip, mininflow, minoutflow : float, optional (default = 0)
The minimum amount of each hydrologic quantity below which a
storm can be considered "small".
Returns
-------
storm_is_small : bool
True if the storm is considered small.
"""
storm_is_small = (
(
self.total_precip_depth is not None
and self.total_precip_depth < minprecip
)
or (
self.total_inflow_volume is not None
and self.total_inflow_volume < mininflow
)
or (
self.total_outflow_volume is not None
and self.total_outflow_volume < minoutflow
)
)
return storm_is_small
def _get_event_time(self, column, bound):
index_map = {"start": 0, "end": -1}
quantity = self.data[self.data[column] > 0]
if quantity.shape[0] == 0:
warnings.warn("Storm has no {}".format(column), UserWarning)
else:
return quantity.index[index_map[bound]]
def _get_max_quantity(self, column):
return self.data[column].max()
def _compute_centroid(self, column):
# ordinal time index of storm
time_idx = [
dates.date2num(idx.to_pydatetime()) for idx in self.data.index.tolist()
]
centroid = numpy.sum(self.data[column] * time_idx) / numpy.sum(
self.data[column]
)
if numpy.isnan(centroid):
return None
else:
return pandas.Timestamp(dates.num2date(centroid)).tz_convert(None)
def _plot_centroids(self, ax, yfactor=0.5):
artists = []
labels = []
y_val = yfactor * ax.get_ylim()[1]
if self.centroid_precip is not None:
ax.plot(
[self.centroid_precip],
[y_val],
color="DarkGreen",
marker="o",
linestyle="none",
zorder=20,
markersize=6,
)
artists.append(
pyplot.Line2D(
[0],
[0],
marker=".",
markersize=6,
linestyle="none",
color="DarkGreen",
)
)
labels.append("Precip. centroid")
if self.centroid_flow is not None:
ax.plot(
[self.centroid_flow],
[y_val],
color="CornflowerBlue",
marker="s",
linestyle="none",
zorder=20,
markersize=6,
)
artists.append(
pyplot.Line2D(
[0],
[0],
marker="s",
markersize=6,
linestyle="none",
color="CornflowerBlue",
)
)
labels.append("Effluent centroid")
if self.centroid_precip is not None and self.centroid_flow is not None:
ax.annotate(
"",
(self.centroid_flow, y_val),
arrowprops=dict(arrowstyle="-|>"),
xytext=(self.centroid_precip, y_val),
)
return artists, labels
def plot_hydroquantity(
self, quantity, ax=None, label=None, otherlabels=None, artists=None
):
""" Draws a hydrologic quantity to a matplotlib axes.
Parameters
----------
quantity : string
Column name of the quantity you want to plot.
ax : matplotlib axes object, optional
The axes on which the data will be plotted. If None, a new
one will be created.
label : string, optional
How the series should be labeled in the figure legend.
otherlabels : list of strings, optional
A list of other legend labels that have already been plotted
to ``ax``. If provided, ``label`` will be appended. If not
provided, and new list will be created.
artists : list of matplotlib artists, optional
A list of other legend items that have already been plotted
to ``ax``. If provided, the artist created will be appended.
If not provided, and new list will be created.
Returns
-------
fig : matplotlib.Figure
The figure containing the plot.
labels : list of strings
Labels to be included in a legend for the figure.
artists : list of matplotlib artists
Symbology for the figure legend.
"""
# setup the figure
fig, ax = validate.axes(ax)
if label is None:
label = quantity
# select the plot props based on the column
try:
meta = self.meta[quantity]
except KeyError:
raise KeyError("{} not available".format(quantity))
# plot the data
self.data[quantity].fillna(0).plot(
ax=ax, kind="area", color=meta["color"], alpha=meta["alpha"], zorder=5
)
if artists is not None:
proxy = pyplot.Rectangle(
(0, 0), 1, 1, facecolor=meta["color"], linewidth=0, alpha=meta["alpha"]
)
artists.append(proxy)
if otherlabels is not None:
otherlabels.append(label)
return fig, otherlabels, artists
def summaryPlot(
self,
axratio=2,
filename=None,
showLegend=True,
precip=True,
inflow=True,
outflow=True,
figopts={},
serieslabels={},
):
"""
Creates a figure showing the hydrlogic record (flow and
precipitation) of the storm
Input:
axratio : optional float or int (default = 2)
Relative height of the flow axis compared to the
precipiation axis.
filename : optional string (default = None)
Filename to which the figure will be saved.
**figwargs will be passed on to `pyplot.Figure`
Writes:
Figure of flow and precipitation for a storm
Returns:
None
"""
fig = pyplot.figure(**figopts)
gs = gridspec.GridSpec(
nrows=2, ncols=1, height_ratios=[1, axratio], hspace=0.12
)
rainax = fig.add_subplot(gs[0])
rainax.yaxis.set_major_locator(pyplot.MaxNLocator(5))
flowax = fig.add_subplot(gs[1], sharex=rainax)
# create the legend proxy artists
artists = []
labels = []
# in the label assignment: `serieslabels.pop(item, item)` might
# seem odd. What it does is looks for a label (value) in the
# dictionary with the key equal to `item`. If there is no valur
# for that key in the dictionary the `item` itself is returned.
# so if there's nothing called "test" in mydict,
# `mydict.pop("test", "test")` returns `"test"`.
if inflow:
fig, labels, artists = self.plot_hydroquantity(
self.inflowcol,
ax=flowax,
label=serieslabels.pop(self.inflowcol, self.inflowcol),
otherlabels=labels,
artists=artists,
)
if outflow:
fig, labels, arti = self.plot_hydroquantity(
self.outflowcol,
ax=flowax,
label=serieslabels.pop(self.outflowcol, self.outflowcol),
otherlabels=labels,
artists=artists,
)
if precip:
fig, labels, arti = self.plot_hydroquantity(
self.precipcol,
ax=rainax,
label=serieslabels.pop(self.precipcol, self.precipcol),
otherlabels=labels,
artists=artists,
)
rainax.invert_yaxis()
if showLegend:
leg = rainax.legend(
artists,
labels,
fontsize=7,
ncol=1,
markerscale=0.75,
frameon=False,
loc="lower right",
)
leg.get_frame().set_zorder(25)
_leg = [leg]
else:
_leg = None
seaborn.despine(ax=rainax, bottom=True, top=False)
seaborn.despine(ax=flowax)
flowax.set_xlabel("")
rainax.set_xlabel("")
if filename is not None:
fig.savefig(
filename,
dpi=300,
transparent=True,
bbox_inches="tight",
bbox_extra_artists=_leg,
)
return fig, artists, labels
class HydroRecord(object):
""" Class representing an entire hydrologic record.
Parameters
----------
hydrodata : pandas.DataFrame
DataFrame of hydrologic data of the storm. Should contain
a unique index of type pandas.DatetimeIndex.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
baseflowcol : string, optional (default = None)
Name of column in `hydrodata` containing boolean indicating
which records are considered baseflow.
stormcol : string (default = 'storm')
Name of column in `hydrodata` indentifying distinct storms.
minprecip, mininflow, minoutflow : float, optional (default = 0)
The minimum amount of each hydrologic quantity below which a
storm can be considered "small".
outputfreqMinutes : int, optional (default = 10)
The default frequency (minutes) to which all data will be
resampled. Precipitation data will be summed up across '
multiple timesteps during resampling, while flow will be
averaged.
intereventHours : int, optional (default = 6)
The dry duration (no flow or rain) required to signal the end of
a storm.
volume_conversion : float, optional (default = 1)
Conversion factor to go from flow to volume for a single
observation.
stormclass : object, optional
Defaults to wqio.hydro.Storm. Can be a subclass of that in cases
where custom functionality is needed.
lowmem : bool (default = False)
If True, all dry observations are removed from the dataframe.
"""
# TODO: rename `outputfreqMinutes` to `outputPeriodMinutes`
def __init__(
self,
hydrodata,
precipcol=None,
inflowcol=None,
outflowcol=None,
baseflowcol=None,
tempcol=None,
stormcol="storm",
minprecip=0.0,
mininflow=0.0,
minoutflow=0.0,
outputfreqMinutes=10,
intereventHours=6,
volume_conversion=1,
stormclass=None,
lowmem=False,
):
# validate input
if precipcol is None and inflowcol is None and outflowcol is None:
msg = "`hydrodata` must have at least a precip or in/outflow column"
raise ValueError(msg)
self.stormclass = stormclass or Storm
# static input
self._raw_data = hydrodata
self.precipcol = precipcol
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.baseflowcol = baseflowcol
self.stormcol = stormcol
self.tempcol = tempcol
self.outputfreq = pandas.offsets.Minute(outputfreqMinutes)
self.intereventHours = intereventHours
self.intereventPeriods = MIN_PER_HOUR / self.outputfreq.n * self.intereventHours
self.minprecip = minprecip
self.mininflow = mininflow
self.minoutflow = minoutflow
self.volume_conversion = volume_conversion
self.lowmem = lowmem
# properties
self._data = None
self._all_storms = None
self._storms = None
self._storm_stats = None
@property
def data(self):
if self._data is None:
self._data = self._define_storms()
if self.lowmem:
self._data = self._data[self._data[self.stormcol] != 0]
return self._data
@property
def all_storms(self):
if self._all_storms is None:
self._all_storms = {}
for storm_number in self.data[self.stormcol].unique():
if storm_number > 0:
this_storm = self.stormclass(
self.data,
storm_number,
precipcol=self.precipcol,
inflowcol=self.inflowcol,
outflowcol=self.outflowcol,
tempcol=self.tempcol,
stormcol=self.stormcol,
volume_conversion=self.volume_conversion,
freqMinutes=self.outputfreq.n,
)
self._all_storms[storm_number] = this_storm
return self._all_storms
@property
def storms(self):
if self._storms is None:
self._storms = {}
for snum, storm in self.all_storms.items():
is_small = storm.is_small(
minprecip=self.minprecip,
mininflow=self.mininflow,
minoutflow=self.minoutflow,
)
if not is_small:
self._storms[snum] = storm
return self._storms
@property
def storm_stats(self):
col_order = [
"Storm Number",
"Antecedent Days",
"Season",
"Start Date",
"End Date",
"Duration Hours",
"Peak Precip Intensity",
"Total Precip Depth",
"Total Inflow Volume",
"Peak Inflow",
"Total Outflow Volume",
"Peak Outflow",
"Peak Lag Hours",
"Centroid Lag Hours",
]
if self._storm_stats is None:
storm_stats = pandas.DataFrame(
[self.storms[sn].summary_dict for sn in self.storms]
)
self._storm_stats = storm_stats[col_order]
return self._storm_stats.sort_values(by=["Storm Number"]).reset_index(drop=True)
def _define_storms(self, debug=False):
parsed = parse_storm_events(
self._raw_data,
self.intereventHours,
self.outputfreq.n,
precipcol=self.precipcol,
inflowcol=self.inflowcol,
outflowcol=self.outflowcol,
baseflowcol=self.baseflowcol,
stormcol="storm",
debug=debug,
)
return parsed
def getStormFromTimestamp(self, timestamp, lookback_hours=0, smallstorms=False):
""" Get the storm associdated with a give (sample) date
Parameters
----------
timestamp : pandas.Timestamp
The date/time for which to search within the hydrologic
record.
lookback_hours : positive int or float, optional (default = 0)
If no storm is actively occuring at the provided timestamp,
we can optionally look backwards in the hydrologic record a
fixed amount of time (specified in hours). Negative values
are ignored.
smallstorms : bool, optional (default = False)
If True, small storms will be included in the search.
Returns
-------
storm_number : int
storm : wqio.Storm
"""
# santize date input
timestamp = validate.timestamp(timestamp)
# check lookback hours
if lookback_hours < 0:
raise ValueError("`lookback_hours` must be greater than 0")
# initial search for the storm
storm_number = int(self.data.loc[:timestamp, self.stormcol].iloc[-1])
# look backwards if we have too
if (storm_number == 0 or pandas.isnull(storm_number)) and lookback_hours != 0:
lookback_time = timestamp - pandas.offsets.Hour(lookback_hours)
storms = self.data.loc[lookback_time:timestamp, [self.stormcol]]
storms = storms[storms > 0].dropna()
if storms.shape[0] == 0:
# no storm
storm_number = None
else:
# storm w/i the lookback period
storm_number = int(storms.iloc[-1])
# return storm_number and storms
if smallstorms:
return storm_number, self.all_storms.get(storm_number, None)
else:
return storm_number, self.storms.get(storm_number, None)
def histogram(self, valuecol, bins, **factoropts):
""" Plot a faceted, categorical histogram of storms.
Parameters
----------
valuecol : str, optional
The name of the column that should be categorized and plotted.
bins : array-like, optional
The right-edges of the histogram bins.
factoropts : keyword arguments, optional
Options passed directly to seaborn.factorplot
Returns
-------
fig : seaborn.FacetGrid
See also
--------
viz.categorical_histogram
seaborn.factorplot
"""
fg = viz.categorical_histogram(self.storm_stats, valuecol, bins, **factoropts)
fg.fig.tight_layout()
return fg
class DrainageArea(object):
def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):
""" A simple object representing the drainage area of a BMP.
Units are not enforced, so keep them consistent yourself. The
calculations available assume that the area of the BMP and the
"total" area are mutually exclusive. In other words,
the watershed outlet is at the BMP inlet.
Parameters
----------
total_area : float, optional (default = 1.0)
The total geometric area of the BMP's catchment
imp_area : float, optional (default = 1.0)
The impervious area of the BMP's catchment
bmp_area : float, optional (default = 0.0)
The geometric area of the BMP itself.
"""
self.total_area = float(total_area)
self.imp_area = float(imp_area)
self.bmp_area = float(bmp_area)
def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):
"""
Estimate runoff volume via Bob Pitt's Simple Method.
Parameters
----------
storm_depth : float
Depth of the storm.
volume_conversion : float, optional (default = 1.0)
Conversion factor to go from [area units] * [depth units] to
the desired [volume units]. If [area] = m^2, [depth] = mm,
and [volume] = L, then `volume_conversion` = 1.
annual_factor : float, optional (default = 1.0)
The Simple Method's annual correction factor to account for
small storms that do not produce runoff.
Returns
-------
runoff_volume : float
The volume of water entering the BMP immediately downstream
of the drainage area.
"""
# volumetric run off coneffiecient
Rv = 0.05 + (0.9 * (self.imp_area / self.total_area))
# run per unit storm depth
drainage_conversion = Rv * self.total_area * volume_conversion
bmp_conversion = self.bmp_area * volume_conversion
# total runoff based on actual storm depth
runoff_volume = (
drainage_conversion * annual_factor + bmp_conversion
) * storm_depth
return runoff_volume
| bsd-3-clause |
381426068/MissionPlanner | Lib/site-packages/numpy/core/code_generators/ufunc_docstrings.py | 57 | 85797 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
y : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Return a bool-type array, the same shape as `x`, True where ``x ==
+/-inf``, False everywhere else.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or bool-type ndarray
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
shape as the input and the values are True where the
corresponding element of the input is positive or negative
infinity; elsewhere the values are False. If a second argument
was supplied the result is stored there. If the type of that array
is a numeric type the result is represented as zeros and ones, if
the type is boolean then as False and True, respectively.
The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like` for further details.
See Also
--------
zeros_like, ones
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output.
See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
(A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.)
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| gpl-3.0 |
tensorflow/models | research/cognitive_planning/viz_active_vision_dataset_main.py | 5 | 13173 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initializes at random location and visualizes the optimal path.
Different modes of execution:
1) benchmark: It generates benchmark_iter sample trajectory to random goals
and plots the histogram of path lengths. It can be also used to see how fast
it runs.
2) vis: It visualizes the generated paths by image, semantic segmentation, and
so on.
3) human: allows the user to navigate through environment from keyboard input.
python viz_active_vision_dataset_main -- \
--mode=benchmark --benchmark_iter=1000 --gin_config=envs/configs/active_vision_config.gin
python viz_active_vision_dataset_main -- \
--mode=vis \
--gin_config=envs/configs/active_vision_config.gin
python viz_active_vision_dataset_main -- \
--mode=human \
--gin_config=envs/configs/active_vision_config.gin
python viz_active_vision_dataset_main.py --mode=eval --eval_folder=/usr/local/google/home/$USER/checkin_log_det/evals/ --output_folder=/usr/local/google/home/$USER/test_imgs/ --gin_config=envs/configs/active_vision_config.gin
"""
import matplotlib
# pylint: disable=g-import-not-at-top
# Need Tk for interactive plots.
matplotlib.use('TkAgg')
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
import os
from pyglib import app
from pyglib import flags
import gin
import cv2
from envs import active_vision_dataset_env
from envs import task_env
VIS_MODE = 'vis'
HUMAN_MODE = 'human'
BENCHMARK_MODE = 'benchmark'
GRAPH_MODE = 'graph'
EVAL_MODE = 'eval'
flags.DEFINE_enum('mode', VIS_MODE,
[VIS_MODE, HUMAN_MODE, BENCHMARK_MODE, GRAPH_MODE, EVAL_MODE],
'mode of the execution')
flags.DEFINE_integer('benchmark_iter', 1000,
'number of iterations for benchmarking')
flags.DEFINE_string('eval_folder', '', 'the path to the eval folder')
flags.DEFINE_string('output_folder', '',
'the path to which the images and gifs are written')
flags.DEFINE_multi_string('gin_config', [],
'List of paths to a gin config files for the env.')
flags.DEFINE_multi_string('gin_params', [],
'Newline separated list of Gin parameter bindings.')
mt = task_env.ModalityTypes
FLAGS = flags.FLAGS
def benchmark(env, targets):
"""Benchmarks the speed of sequence generation by env.
Args:
env: environment.
targets: list of target classes.
"""
episode_lengths = {}
all_init_configs = {}
all_actions = dict([(a, 0.) for a in env.actions])
for i in range(FLAGS.benchmark_iter):
path, actions, _, _ = env.random_step_sequence()
selected_actions = np.argmax(actions, axis=-1)
new_actions = dict([(a, 0.) for a in env.actions])
for a in selected_actions:
new_actions[env.actions[a]] += 1. / selected_actions.shape[0]
for a in new_actions:
all_actions[a] += new_actions[a] / FLAGS.benchmark_iter
start_image_id, world, goal = env.get_init_config(path)
print world
if world not in all_init_configs:
all_init_configs[world] = set()
all_init_configs[world].add((start_image_id, goal, len(actions)))
if env.goal_index not in episode_lengths:
episode_lengths[env.goal_index] = []
episode_lengths[env.goal_index].append(len(actions))
for i, cls in enumerate(episode_lengths):
plt.subplot(231 + i)
plt.hist(episode_lengths[cls])
plt.title(targets[cls])
plt.show()
def human(env, targets):
"""Lets user play around the env manually."""
string_key_map = {
'a': 'left',
'd': 'right',
'w': 'forward',
's': 'backward',
'j': 'rotate_ccw',
'l': 'rotate_cw',
'n': 'stop'
}
integer_key_map = {
'a': env.actions.index('left'),
'd': env.actions.index('right'),
'w': env.actions.index('forward'),
's': env.actions.index('backward'),
'j': env.actions.index('rotate_ccw'),
'l': env.actions.index('rotate_cw'),
'n': env.actions.index('stop')
}
for k in integer_key_map:
integer_key_map[k] = np.int32(integer_key_map[k])
plt.ion()
for _ in range(20):
obs = env.reset()
steps = -1
action = None
while True:
print 'distance = ', obs[task_env.ModalityTypes.DISTANCE]
steps += 1
depth_value = obs[task_env.ModalityTypes.DEPTH][:, :, 0]
depth_mask = obs[task_env.ModalityTypes.DEPTH][:, :, 1]
seg_mask = np.squeeze(obs[task_env.ModalityTypes.SEMANTIC_SEGMENTATION])
det_mask = np.argmax(
obs[task_env.ModalityTypes.OBJECT_DETECTION], axis=-1)
img = obs[task_env.ModalityTypes.IMAGE]
plt.subplot(231)
plt.title('steps = {}'.format(steps))
plt.imshow(img.astype(np.uint8))
plt.subplot(232)
plt.imshow(depth_value)
plt.title('depth value')
plt.subplot(233)
plt.imshow(depth_mask)
plt.title('depth mask')
plt.subplot(234)
plt.imshow(seg_mask)
plt.title('seg')
plt.subplot(235)
plt.imshow(det_mask)
plt.title('det')
plt.subplot(236)
plt.title('goal={}'.format(targets[env.goal_index]))
plt.draw()
while True:
s = raw_input('key = ')
if np.random.rand() > 0.5:
key_map = string_key_map
else:
key_map = integer_key_map
if s in key_map:
action = key_map[s]
break
else:
print 'invalid action'
print 'action = {}'.format(action)
if action == 'stop':
print 'dist to goal: {}'.format(len(env.path_to_goal()) - 2)
break
obs, reward, done, info = env.step(action)
print 'reward = {}, done = {}, success = {}'.format(
reward, done, info['success'])
def visualize_random_step_sequence(env):
"""Visualizes random sequence of steps."""
plt.ion()
for _ in range(20):
path, actions, _, step_outputs = env.random_step_sequence(max_len=30)
print 'path = {}'.format(path)
for action, step_output in zip(actions, step_outputs):
obs, _, done, _ = step_output
depth_value = obs[task_env.ModalityTypes.DEPTH][:, :, 0]
depth_mask = obs[task_env.ModalityTypes.DEPTH][:, :, 1]
seg_mask = np.squeeze(obs[task_env.ModalityTypes.SEMANTIC_SEGMENTATION])
det_mask = np.argmax(
obs[task_env.ModalityTypes.OBJECT_DETECTION], axis=-1)
img = obs[task_env.ModalityTypes.IMAGE]
plt.subplot(231)
plt.imshow(img.astype(np.uint8))
plt.subplot(232)
plt.imshow(depth_value)
plt.title('depth value')
plt.subplot(233)
plt.imshow(depth_mask)
plt.title('depth mask')
plt.subplot(234)
plt.imshow(seg_mask)
plt.title('seg')
plt.subplot(235)
plt.imshow(det_mask)
plt.title('det')
plt.subplot(236)
print 'action = {}'.format(action)
print 'done = {}'.format(done)
plt.draw()
if raw_input('press \'n\' to go to the next random sequence. Otherwise, '
'press any key to continue...') == 'n':
break
def visualize(env, input_folder, output_root_folder):
"""visualizes images for sequence of steps from the evals folder."""
def which_env(file_name):
img_name = file_name.split('_')[0][2:5]
env_dict = {'161': 'Home_016_1', '131': 'Home_013_1', '111': 'Home_011_1'}
if img_name in env_dict:
return env_dict[img_name]
else:
raise ValueError('could not resolve env: {} {}'.format(
img_name, file_name))
def which_goal(file_name):
return file_name[file_name.find('_')+1:]
output_images_folder = os.path.join(output_root_folder, 'images')
output_gifs_folder = os.path.join(output_root_folder, 'gifs')
if not tf.gfile.IsDirectory(output_images_folder):
tf.gfile.MakeDirs(output_images_folder)
if not tf.gfile.IsDirectory(output_gifs_folder):
tf.gfile.MakeDirs(output_gifs_folder)
npy_files = [
os.path.join(input_folder, name)
for name in tf.gfile.ListDirectory(input_folder)
if name.find('npy') >= 0
]
for i, npy_file in enumerate(npy_files):
print 'saving images {}/{}'.format(i, len(npy_files))
pure_name = npy_file[npy_file.rfind('/') + 1:-4]
output_folder = os.path.join(output_images_folder, pure_name)
if not tf.gfile.IsDirectory(output_folder):
tf.gfile.MakeDirs(output_folder)
print '*******'
print pure_name[0:pure_name.find('_')]
env.reset_for_eval(which_env(pure_name),
which_goal(pure_name),
pure_name[0:pure_name.find('_')],
)
with tf.gfile.Open(npy_file) as h:
states = np.load(h).item()['states']
images = [
env.observation(state)[mt.IMAGE] for state in states
]
for j, img in enumerate(images):
cv2.imwrite(os.path.join(output_folder, '{0:03d}'.format(j) + '.jpg'),
img[:, :, ::-1])
print 'converting to gif'
os.system(
'convert -set delay 20 -colors 256 -dispose 1 {}/*.jpg {}.gif'.format(
output_folder,
os.path.join(output_gifs_folder, pure_name + '.gif')
)
)
def evaluate_folder(env, folder_path):
"""Evaluates the performance from the evals folder."""
targets = ['fridge', 'dining_table', 'microwave', 'tv', 'couch']
def compute_acc(npy_file):
with tf.gfile.Open(npy_file) as h:
data = np.load(h).item()
if npy_file.find('dining_table') >= 0:
category = 'dining_table'
else:
category = npy_file[npy_file.rfind('_') + 1:-4]
return category, data['distance'][-1] - 2
def evaluate_iteration(folder):
"""Evaluates the data from the folder of certain eval iteration."""
print folder
npy_files = [
os.path.join(folder, name)
for name in tf.gfile.ListDirectory(folder)
if name.find('npy') >= 0
]
eval_stats = {c: [] for c in targets}
for npy_file in npy_files:
try:
category, dist = compute_acc(npy_file)
except: # pylint: disable=bare-except
continue
eval_stats[category].append(float(dist <= 5))
for c in eval_stats:
if not eval_stats[c]:
print 'incomplete eval {}: empty class {}'.format(folder_path, c)
return None
eval_stats[c] = np.mean(eval_stats[c])
eval_stats['mean'] = np.mean(eval_stats.values())
return eval_stats
checkpoint_folders = [
folder_path + x
for x in tf.gfile.ListDirectory(folder_path)
if tf.gfile.IsDirectory(folder_path + x)
]
print '{} folders found'.format(len(checkpoint_folders))
print '------------------------'
all_iters = []
all_accs = []
for i, folder in enumerate(checkpoint_folders):
print 'processing {}/{}'.format(i, len(checkpoint_folders))
eval_stats = evaluate_iteration(folder)
if eval_stats is None:
continue
else:
iter_no = int(folder[folder.rfind('/') + 1:])
print 'result ', iter_no, eval_stats['mean']
all_accs.append(eval_stats['mean'])
all_iters.append(iter_no)
all_accs = np.asarray(all_accs)
all_iters = np.asarray(all_iters)
idx = np.argmax(all_accs)
print 'best result at iteration {} was {}'.format(all_iters[idx],
all_accs[idx])
order = np.argsort(all_iters)
all_iters = all_iters[order]
all_accs = all_accs[order]
#plt.plot(all_iters, all_accs)
#plt.show()
#print 'done plotting'
best_iteration_folder = os.path.join(folder_path, str(all_iters[idx]))
print 'generating gifs and images for {}'.format(best_iteration_folder)
visualize(env, best_iteration_folder, FLAGS.output_folder)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params)
print('********')
print(FLAGS.mode)
print(FLAGS.gin_config)
print(FLAGS.gin_params)
env = active_vision_dataset_env.ActiveVisionDatasetEnv(modality_types=[
task_env.ModalityTypes.IMAGE,
task_env.ModalityTypes.SEMANTIC_SEGMENTATION,
task_env.ModalityTypes.OBJECT_DETECTION, task_env.ModalityTypes.DEPTH,
task_env.ModalityTypes.DISTANCE
])
if FLAGS.mode == BENCHMARK_MODE:
benchmark(env, env.possible_targets)
elif FLAGS.mode == GRAPH_MODE:
for loc in env.worlds:
env.check_scene_graph(loc, 'fridge')
elif FLAGS.mode == HUMAN_MODE:
human(env, env.possible_targets)
elif FLAGS.mode == VIS_MODE:
visualize_random_step_sequence(env)
elif FLAGS.mode == EVAL_MODE:
evaluate_folder(env, FLAGS.eval_folder)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
yanlend/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/groupby/test_index_as_string.py | 1 | 3760 | import pytest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas.util.testing as tm
@pytest.fixture(params=[['inner'], ['inner', 'outer']])
def frame(request):
levels = request.param
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
if levels:
df = df.set_index(levels)
return df
@pytest.fixture()
def series():
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
s = df.set_index(['outer', 'inner', 'B'])['A']
return s
@pytest.mark.parametrize('key_strs,groupers', [
('inner', # Index name
pd.Grouper(level='inner')
),
(['inner'], # List of index name
[pd.Grouper(level='inner')]
),
(['B', 'inner'], # Column and index
['B', pd.Grouper(level='inner')]
),
(['inner', 'B'], # Index and column
[pd.Grouper(level='inner'), 'B'])])
def test_grouper_index_level_as_string(frame, key_strs, groupers):
result = frame.groupby(key_strs).mean()
expected = frame.groupby(groupers).mean()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('levels', [
'inner', 'outer', 'B',
['inner'], ['outer'], ['B'],
['inner', 'outer'], ['outer', 'inner'],
['inner', 'outer', 'B'], ['B', 'outer', 'inner']
])
def test_grouper_index_level_as_string_series(series, levels):
# Compute expected result
if isinstance(levels, list):
groupers = [pd.Grouper(level=lv) for lv in levels]
else:
groupers = pd.Grouper(level=levels)
expected = series.groupby(groupers).mean()
# Compute and check result
result = series.groupby(levels).mean()
assert_series_equal(result, expected)
@pytest.mark.parametrize('key_strs,key_groupers,level_groupers', [
('inner', # Index name
pd.Grouper(key='inner'),
pd.Grouper(level='inner'),
),
(['inner'], # List of index name
[pd.Grouper(key='inner')],
[pd.Grouper(level='inner')]
),
(['B', 'inner'], # Column and index
['B', pd.Grouper(key='inner')],
['B', pd.Grouper(level='inner')]
),
(['inner', 'B'], # Index and column
[pd.Grouper(key='inner'), 'B'],
[pd.Grouper(level='inner'), 'B'])])
def test_grouper_column_index_level_precedence(frame,
key_strs,
key_groupers,
level_groupers):
# GH 5677, when a string passed as the `by` parameter
# matches a column and an index level the column takes
# precedence and a FutureWarning is raised
# Add 'inner' column to frame
# (frame already has an 'inner' index)
frame['inner'] = [1, 1, 1, 1, 1, 1]
# Performing a groupby with strings should produce warning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = frame.groupby(key_strs).mean()
# Grouping with key Grouper should produce the same result and no warning
with tm.assert_produces_warning(False):
expected = frame.groupby(key_groupers).mean()
assert_frame_equal(result, expected)
# Grouping with level Grouper should produce a difference result but
# still no warning
with tm.assert_produces_warning(False):
not_expected = frame.groupby(level_groupers).mean()
assert not result.index.equals(not_expected.index)
| apache-2.0 |
araichev/gtfstk | gtfstk/shapes.py | 1 | 8064 | """
Functions about shapes.
"""
from typing import Optional, List, Dict, TYPE_CHECKING
import pandas as pd
from pandas import DataFrame
import numpy as np
import utm
import shapely.geometry as sg
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def build_geometry_by_shape(
feed: "Feed",
shape_ids: Optional[List[str]] = None,
*,
use_utm: bool = False,
) -> Dict:
"""
Return a dictionary with structure shape_id -> Shapely LineString
of shape.
Parameters
----------
feed : Feed
shape_ids : list
IDs of shapes in ``feed.shapes`` to restrict output to; return
all shapes if ``None``.
use_utm : boolean
If ``True``, then use local UTM coordinates; otherwise, use
WGS84 coordinates
Returns
-------
dictionary
Has the structure
shape_id -> Shapely LineString of shape.
If ``feed.shapes is None``, then return ``None``.
Return the empty dictionary if ``feed.shapes is None``.
"""
if feed.shapes is None:
return {}
# Note the output for conversion to UTM with the utm package:
# >>> u = utm.from_latlon(47.9941214, 7.8509671)
# >>> print u
# (414278, 5316285, 32, 'T')
d = {}
shapes = feed.shapes.copy()
if shape_ids is not None:
shapes = shapes[shapes["shape_id"].isin(shape_ids)]
if use_utm:
for shape, group in shapes.groupby("shape_id"):
lons = group["shape_pt_lon"].values
lats = group["shape_pt_lat"].values
xys = [
utm.from_latlon(lat, lon)[:2] for lat, lon in zip(lats, lons)
]
d[shape] = sg.LineString(xys)
else:
for shape, group in shapes.groupby("shape_id"):
lons = group["shape_pt_lon"].values
lats = group["shape_pt_lat"].values
lonlats = zip(lons, lats)
d[shape] = sg.LineString(lonlats)
return d
def shapes_to_geojson(
feed: "Feed", shape_ids: Optional[List[str]] = None
) -> Dict:
"""
Return a (decoded) GeoJSON FeatureCollection of LineString features
representing ``feed.shapes``.
Each feature will have a ``shape_id`` property.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If a list of shape IDs is given, then return only the LineString
features corresponding to those shape IDS.
Return the empty dictionary if ``feed.shapes is None``
"""
geometry_by_shape = feed.build_geometry_by_shape(shape_ids=shape_ids)
if geometry_by_shape:
fc = {
"type": "FeatureCollection",
"features": [
{
"properties": {"shape_id": shape},
"type": "Feature",
"geometry": sg.mapping(linestring),
}
for shape, linestring in geometry_by_shape.items()
],
}
else:
fc = {}
return fc
def get_shapes_intersecting_geometry(
feed: "Feed", geometry, geo_shapes=None, *, geometrized: bool = False
) -> DataFrame:
"""
Return the slice of ``feed.shapes`` that contains all shapes that
intersect the given Shapely geometry, e.g. a Polygon or LineString.
Parameters
----------
feed : Feed
geometry : Shapley geometry, e.g. a Polygon
Specified in WGS84 coordinates
geo_shapes : GeoPandas GeoDataFrame
The output of :func:`geometrize_shapes`
geometrize : boolean
If ``True``, then return the shapes DataFrame as a GeoDataFrame
of the form output by :func:`geometrize_shapes`
Returns
-------
DataFrame or GeoDataFrame
Notes
-----
- Requires GeoPandas
- Specifying ``geo_shapes`` will skip the first step of the
algorithm, namely, geometrizing ``feed.shapes``
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``, if ``geo_shapes`` is not given
"""
if geo_shapes is not None:
f = geo_shapes.copy()
else:
f = geometrize_shapes(feed.shapes)
cols = f.columns
f["hit"] = f["geometry"].intersects(geometry)
f = f[f["hit"]][cols]
if geometrized:
return f
else:
return ungeometrize_shapes(f)
def append_dist_to_shapes(feed: "Feed") -> "Feed":
"""
Calculate and append the optional ``shape_dist_traveled`` field in
``feed.shapes`` in terms of the distance units ``feed.dist_units``.
Return the resulting Feed.
Notes
-----
- As a benchmark, using this function on `this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_
produces a ``shape_dist_traveled`` column that differs by at most
0.016 km in absolute value from of the original values
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``
"""
if feed.shapes is None:
raise ValueError(
"This function requires the feed to have a shapes.txt file"
)
feed = feed.copy()
f = feed.shapes
m_to_dist = hp.get_convert_dist("m", feed.dist_units)
def compute_dist(group):
# Compute the distances of the stops along this trip
group = group.sort_values("shape_pt_sequence")
shape = group["shape_id"].iat[0]
if not isinstance(shape, str):
group["shape_dist_traveled"] = np.nan
return group
points = [
sg.Point(utm.from_latlon(lat, lon)[:2])
for lon, lat in group[["shape_pt_lon", "shape_pt_lat"]].values
]
p_prev = points[0]
d = 0
distances = [0]
for p in points[1:]:
d += p.distance(p_prev)
distances.append(d)
p_prev = p
group["shape_dist_traveled"] = distances
return group
g = f.groupby("shape_id", group_keys=False).apply(compute_dist)
# Convert from meters
g["shape_dist_traveled"] = g["shape_dist_traveled"].map(m_to_dist)
feed.shapes = g
return feed
def geometrize_shapes(
shapes: DataFrame, *, use_utm: bool = False
) -> DataFrame:
"""
Given a GTFS shapes DataFrame, convert it to a GeoPandas
GeoDataFrame and return the result.
The result has a ``'geometry'`` column of WGS84 LineStrings
instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``,
``'shape_pt_lat'``, and ``'shape_dist_traveled'``.
If ``use_utm``, then use local UTM coordinates for the geometries.
Notes
------
Requires GeoPandas.
"""
import geopandas as gpd
f = shapes.copy().sort_values(["shape_id", "shape_pt_sequence"])
def my_agg(group):
d = {}
d["geometry"] = sg.LineString(
group[["shape_pt_lon", "shape_pt_lat"]].values
)
return pd.Series(d)
g = f.groupby("shape_id").apply(my_agg).reset_index()
g = gpd.GeoDataFrame(g, crs=cs.WGS84)
if use_utm:
lat, lon = f.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values
crs = hp.get_utm_crs(lat, lon)
g = g.to_crs(crs)
return g
def ungeometrize_shapes(geo_shapes) -> DataFrame:
"""
The inverse of :func:`geometrize_shapes`.
Produces the columns:
- ``'shape_id'``
- ``'shape_pt_sequence'``
- ``'shape_pt_lon'``
- ``'shape_pt_lat'``
If ``geo_shapes`` is in UTM coordinates (has a UTM CRS property),
then convert thoes UTM coordinates back to WGS84 coordinates,
which is the standard for a GTFS shapes table.
"""
geo_shapes = geo_shapes.to_crs(cs.WGS84)
F = []
for index, row in geo_shapes.iterrows():
F.extend(
[
[row["shape_id"], i, x, y]
for i, (x, y) in enumerate(row["geometry"].coords)
]
)
return pd.DataFrame(
F,
columns=[
"shape_id",
"shape_pt_sequence",
"shape_pt_lon",
"shape_pt_lat",
],
)
| mit |
Ziqi-Li/bknqgis | bokeh/bokeh/sampledata/airports.py | 15 | 1027 | """ The data in airports.json is a subset of US airports with field
elevations > 1500 meters. The query result was taken from
.. code-block:: none
http://services.nationalmap.gov/arcgis/rest/services/GlobalMap/GlobalMapWFS/MapServer/10/query
on October 15, 2015.
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'airports sample data requires Pandas (http://pandas.pydata.org) to be installed')
import json
import os
from . import _data_dir
with open(os.path.join(_data_dir(), 'airports.json'), 'r') as data_file:
content = data_file.read()
airports = json.loads(content)
schema = [['attributes', 'nam'], ['attributes', 'zv3'], ['geometry', 'x'], ['geometry', 'y']]
data = pd.io.json.json_normalize(airports['features'], meta=schema)
data.rename(columns={'attributes.nam': 'name', 'attributes.zv3': 'elevation'}, inplace=True)
data.rename(columns={'geometry.x': 'x', 'geometry.y': 'y'}, inplace=True)
| gpl-2.0 |
radk0s/pathfinding | algorithm/graph.py | 1 | 2681 | from datetime import datetime
from cost import cost as costNorm
import dijkstra as d
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
import os
def cost(frm, to):
return costNorm(frm.lon, frm.lat, frm.ele, to.lon, to.lat, to.ele)
def graph_path(frm, to, res):
start_time = datetime.now()
g = d.Graph()
filename = os.getcwd()+'/data/data'+str(res)+'.csv'
with open(filename, 'r') as file:
count = 0
for line in file.readlines():
val = line.split('\t')
g.add_vertex(count, float(val[1]),float(val[0]),float(val[2]))
count += 1
g.add_edge(0, 1, cost(g.get_vertex(0), g.get_vertex(1)))
for node in xrange(res**2 - 1):
if node >= res**2 - res:
g.add_edge(node, node+1, cost(g.get_vertex(node), g.get_vertex(node+1)))
else:
g.add_edge(node, node + res, cost(g.get_vertex(node), g.get_vertex(node + res)))
if node % res != (res-1):
g.add_edge(node, node + 1, cost(g.get_vertex(node), g.get_vertex(node + 1)))
start = frm
stop = to
# origin = g.get_vertex(start)
target = g.get_vertex(stop)
d.dijkstra(g, g.get_vertex(start))
path = [target.get_id()]
d.shortest(target, path)
print 'The shortest path : %s' % (path[::-1])
print 'total cost: ' + str(target.distance)
elapsed = datetime.now() - start_time
print elapsed
x = []
y = []
z = []
with open(filename, 'r') as file:
for line in file.readlines():
val = line.split('\t')
x.append(float(val[0]))
y.append(float(val[1]))
z.append(float(val[2]))
x = np.array(x)
y = np.array(y)
z = np.array(z)
xi, yi = np.linspace(x.min(), x.max(), 100), np.linspace(y.min(), y.max(), 100)
xi, yi = np.meshgrid(xi, yi)
rbf = scipy.interpolate.Rbf(x, y, z, function='linear')
zi = rbf(xi, yi)
xs = [g.get_vertex(v).lon for v in path]
ys = [g.get_vertex(v).lat for v in path]
plt.gcf().canvas.set_window_title('Dijkstra shortest path')
plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()], cmap='terrain')
plt.plot(xs, ys)
plt.plot(xs[-1], ys[-1], 'g^')
plt.plot(xs[0], ys[0], 'rs')
plt.colorbar()
text = 'path cost: ' + str(target.distance) + '\n' \
+ 'time: ' + str(elapsed)
plt.suptitle(text, fontsize=14, fontweight='bold')
plt.savefig('graph_path.png')
plt.close()
# plt.show()
return [(g.get_vertex(v).lon, g.get_vertex(v).lat) for v in reversed(path)]
| mit |
SummaLabs/DLS | app/backend-test/core_models/run03_test_train_model_on_dataset.py | 1 | 1751 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import app.backend.core.utils as dlsutils
import json
import skimage.io as io
import matplotlib.pyplot as plt
from keras.utils.visualize_util import plot as kplot
from app.backend.core.datasets.dbwatcher import DatasetsWatcher
from app.backend.core.models.flow_parser import DLSDesignerFlowsParser
from app.backend.core.models.batcher_image2d import BatcherImage2DLMDB
from app.backend.core.models.keras_trainer_v4 import KerasTrainer
pathTestModel='../../../data-test/test-models-json/test_cnn1.json'
if __name__ == '__main__':
dirData = dlsutils.getPathForDatasetDir()
dirModels = dlsutils.getPathForModelsDir()
dbWatcher = DatasetsWatcher(dirData)
dbWatcher.refreshDatasetsInfo()
assert ( len(dbWatcher.dictDbInfo.keys())>0 )
dbInfoTest = dbWatcher.dictDbInfo[dbWatcher.dictDbInfo.keys()[0]]
print ('Dataset for tests : [ %s ]' % dbInfoTest.__str__())
#
with open(pathTestModel, 'r') as f:
jsonModelData = json.load(f)
modelParser = DLSDesignerFlowsParser(jsonModelData)
modelTrainer, modelConfig = modelParser.buildKerasTrainer()
batcherDB = BatcherImage2DLMDB(dbInfoTest.pathDB)
#
modelTrainerAdjusted = modelTrainer.adjustModelInputOutput2DBData(modelTrainer.model, batcherDB)
for ii,ll in enumerate(modelTrainerAdjusted.layers):
print ('[%d/%d] : %s, shape: inp=%s, out=%s' % (ii,len(modelTrainerAdjusted.layers), ll, ll.input_shape, ll.output_shape))
print ('*** Total Model params: %d' % modelTrainerAdjusted.count_params())
#
fimg = '/tmp/keras_draw.png'
kplot(modelTrainerAdjusted, to_file=fimg, show_shapes=True)
img = io.imread(fimg)
plt.imshow(img)
plt.show()
| mit |
vortex-ape/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 65 | 1479 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone, edgecolor='black')
# fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, max_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
# fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, max_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Openergy/oplus | oplus/err.py | 1 | 7074 | import os
import pandas as pd
from . import CONF
class Err:
WARNING = "Warning"
FATAL = "Fatal"
SEVERE = "Severe"
CATEGORIES = (WARNING, FATAL, SEVERE)
def __init__(self, path):
if not os.path.isfile(path):
raise FileNotFoundError("No file at given path: '%s'." % path)
self.path = path
self._df = None # multi-index dataframe
self.info = {}
self._parse()
self._simulation_step_list = list(set(self._df.columns.levels[0]))
@property
def content(self):
with open(self.path, encoding=CONF.encoding) as f:
return f.read()
def _parse(self):
# todo: manage information with ahead "*************"
# todo: manage "error flag" :
# todo: it corresponds to error type for each error_category lines_s.split("=")[0] --> MultiIndex
# first step: warmup
simulation_step = "Warmup"
max_nb = int(1e4)
step_df = pd.DataFrame(columns=self.CATEGORIES, index=range(0, max_nb))
category, index_nb = None, None
with open(self.path, encoding=CONF.encoding) as f:
for row_nb, content in enumerate(f):
# line_nb = var[0]
line_s = content.rstrip("\n")
# GET GENERIC INFORMATION
if "Program Version,EnergyPlus" in line_s:
self.info["EnergyPlus Simulation Version"] = str(line_s.split(",")[2].rstrip("Version "))
if CONF.eplus_version < (9, 0, 0):
# todo: manage properly in compatibility
self.info["Idd_Version"] = str(line_s.split("IDD_Version ")[1])
self.info["Idd_Version"] = None
elif "EnergyPlus Warmup Error Summary" in line_s:
self.info["EnergyPlus Warmup Error Summary"] = str(line_s.split(". ")[1])
elif "EnergyPlus Sizing Error Summary" in line_s:
self.info["EnergyPlus Sizing Error Summary"] = str(line_s.split(". ")[1])
elif "EnergyPlus Completed Successfully" in line_s:
self.info["EnergyPlus Completed Successfully"] = str(line_s.split("--")[1])
# PARSE AND ..
elif "************* Beginning" in line_s:
# SET OUTPUT DATAFRAME
if self._df is None:
iterables = [(simulation_step,), step_df.columns]
columns = pd.MultiIndex.from_product(iterables)
self._df = pd.DataFrame(index=range(0, max_nb), columns=columns)
self._df[simulation_step] = step_df
else:
iterables = [(simulation_step,), list(step_df.columns)]
columns = pd.MultiIndex.from_product(iterables)
multi_step_df = pd.DataFrame(index=range(0, max_nb), columns=columns)
multi_step_df[simulation_step] = step_df
self._df = self._df.join(multi_step_df)
# start new simulation step
simulation_step = line_s.split("Beginning ")[1]
step_df = pd.DataFrame(columns=self.CATEGORIES, index=range(0, max_nb))
elif "** Warning **" in line_s:
category = "Warning"
# new line (index) until next
series = step_df[category].dropna()
if len(series.index) == 0:
index_nb = 0
else:
index_nb = series.index[-1] + 1
step_df[category].loc[index_nb] = str(line_s.split("** Warning **")[1])
elif "** Fatal **" in line_s:
category = "Fatal"
series = step_df[category].dropna()
if len(series.index) == 0:
index_nb = 0
else:
index_nb = series.index[-1] + 1
# new line (index) until next
step_df[category].loc[index_nb] = str(line_s.split("** Fatal **")[1])
elif "** Severe **" in line_s:
category = "Severe"
series = step_df[category].dropna()
if len(series.index) == 0:
index_nb = 0
else:
index_nb = series.index[-1] + 1
# new line (index) until next
step_df[category].loc[index_nb] = str(line_s.split("** Severe **")[1])
elif "** ~~~ **" in line_s: # if we are here, we are sure category and index_nb have been defined
# information to add to error
step_df[category].loc[index_nb] += "\n" + str(line_s.split("** ~~~ **")[1])
# save step_df
iterables = [[simulation_step], step_df.columns]
columns = pd.MultiIndex.from_product(iterables)
multi_step_df = pd.DataFrame(index=range(0, max_nb), columns=columns)
multi_step_df[simulation_step] = step_df
if self._df is not None: # can happen if never encounters "******* Beginning"
self._df = self._df.join(multi_step_df)
else:
self._df = multi_step_df
self.info = pd.Series(self.info, index=self.info.keys())
def get_data(self, simulation_step=None, error_category=None):
"""
Parameters
----------
simulation_step: if not given, returns a raw report
error_category: if only one argument is specified, swaps dataframe report
"""
if simulation_step is None and error_category is None:
return self._df.dropna(axis="rows", how="all")
if simulation_step is not None:
if simulation_step not in self._simulation_step_list:
raise RuntimeError("The simulation_step '%s' is not referred in the error file." % simulation_step)
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_cat '%s' is wrong." % error_category)
iterables = [simulation_step, error_category]
columns = pd.MultiIndex.from_product(iterables)
series = self._df[simulation_step][error_category].dropna(axis="rows", how="all")
df = pd.DataFrame(index=series.index, columns=columns)
df[simulation_step] = series
return df
return self._df[simulation_step].dropna(axis="rows", how="all")
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_category '%s' is wrong." % error_category)
df = self._df.copy()
df.columns = df.columns.swaplevel(0, 1)
return df[error_category].dropna(axis="rows", how="all")
| mpl-2.0 |
parloma/Prensilia | prensilia/Exp_OUTPUT_WINDOWS.py | 1 | 5782 | #Parameters: RF First Classification Layer - RF Second Classification Layer - Name of the volunteer
#Import required
import sys
from os import mkdir,sep,path
#import numpy as np
#from cv2 import *
#from hand_grabber import PyOpenNIHandGrabber
#from pose_recognizer import PyPoseRecognizer
import thread
import xml.etree.ElementTree as ET
#import Image
from random import *
import time
#from my_fun import *
#from sklearn.externals import joblib
from robot_hand import *
import base64
import datetime
import socket
from Crypto.Cipher import AES # encryption library
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
BLOCK_SIZE = 64
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
SIGN_LIST = ['A','B','C','D','F','H','I','K','L','O','P2','S1','V','W','X','Y']
SIGN_INDEX = 0
SIGN_SIZE = 16
MAX_POSES = 100
#Communication Parameters
PASSCODE = 'PARLOMA3'*2
SIGN_WINDOW_NUMBER = 5
#IP = 'localhost'
#IP = '10.10.0.1'
IP = '192.168.85.201'
#PORT = 8089
PORT = 9091
MSGLEN = 88
class ServerSocket:
def __init__(self, IP, PORT, PASSCODE, ser, name):
self.server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server_socket.bind((IP,PORT))
self.server_socket.listen(1)
self.hand = Hand(ser)
#self.hand.perform_hardCalibration()
self.hand.perform_softCalibration()
self.hand.perform_rest()
#self.hand = 0
self.name = name
def start(self, crypt):
print "Initializing..."
#Initializing -> strangely the first sign won't be performed
res = self.hand.perform_sign('A')
res = self.hand.perform_sign('REST')
print 'Ready! Waiting on IP '+IP+' and PORT '+str(PORT)
#return
while True:
client_socket, address = self.server_socket.accept();
print 'Listening to client, address:'
print address
thread.start_new_thread(self.handController, (self.hand, crypt, client_socket, address))
def handController(self, hand, crypt, client_socket, address, *args):
#actual_sign = 'rest'
#actual_counter = 0
while True:
msg = ''
while len(msg) < MSGLEN:
chunk = client_socket.recv(MSGLEN-len(msg))
if chunk == '':
print "Connection to Client is DOWN!"
print address
client_socket.close()
return
msg = msg + chunk
buf = msg
if len(buf) != MSGLEN: # client closed or network error
print 'Client Closed or Communication Error'
print address
client_socket.close()
return
else:
buf = DecodeAES(crypt, buf)
print buf + ' RECEIVED'
if buf == 'quit':
print 'Ok, Quitting'
return
else:
x = buf in SIGN_LIST
if x == False:
print 'Invalid sign received'
out_file = open(self.name+sep+"resultsPerformedHand.txt","a")
out_file.write('Invalid sign ' + buf + ' received! \n')
out_file.close()
else:
res = hand.perform_sign(buf)
#time.sleep(4)
hand.perform_rest()
out_file = open(self.name+sep+"resultsPerformedHand.txt","a")
out_file.write(res + '\n')
out_file.close()
out_file = open(self.name+sep+"resultsReceivedInternet.txt","a")
out_file.write(buf + '\t' + buf + '\n')
out_file.close()
#if actual_sign == buf:
#actual_counter += 1
# if actual_counter == SIGN_WINDOW_NUMBER:
# hand.perform_sign(buf)
# print 'Sign Performed'
#else:
#actual_sign = buf
#actual_counter = 1
#main
if __name__=="__main__":
if len(sys.argv)!=3:
print("Usage:Client > python script_name serial volunteer")
else:
if not path.exists(sys.argv[2]):
mkdir(sys.argv[2])
print "New folder created for this experiment"
out_file = open(sys.argv[2]+sep+"resultsPerformedHand.txt","w")
out_file.write('#Reference pose of PRENSILIA Hand wrt specified poses \n')
out_file.write('#Joints order middle, ring, little, thumb, thumb_o \n')
out_file.close()
out_file = open(sys.argv[2]+sep+"resultsReceivedInternet.txt","w")
out_file.write('Sign received from internet' + '\t' + 'Actual joints positions' + '\n')
out_file.close()
server = ServerSocket(IP, PORT, 'P'*16, sys.argv[1], sys.argv[2])
crypt = AES.new(PASSCODE)
server.start(crypt)
#while True:
# Accept and dispatch connection from client
#print 'Waiting on IP '+IP+' and PORT '+str(PORT)
#(SocketClient, address) = server.server_socket.accept()
#handController(SocketClient, address, crypt)
| apache-2.0 |
jlegendary/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
trela/qikify | qikify/controllers/SVM.py | 1 | 1804 | """Support Vector Machine implementation.
"""
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from qikify.helpers import standardize
class SVM(object):
"""Support Vector Machine implementation.
"""
def __init__(self, grid_search = False):
"""Support Vector Machine implementation.
Parameters
----------
grid_search: boolean
Determine whether the SVM will perform a grid search to tune
hyperparameters.
"""
self.model = None
self.scale_dict = None
self.grid_search = grid_search
def fit(self, chips):
"""Train a support vector machine model.
Parameters
----------
chips: list
Contains a stored array of Chip objects
"""
X = [chip.LCT.values() for chip in chips]
gnd = [chip.gnd for chip in chips]
if grid_search:
grid = { 'C': [1, 5, 10, 50, 100], \
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1] }
print 'SVM: Grid search using parameter grid: ', grid
self.model = GridSearchCV(SVC(kernel='rbf'), grid, n_jobs=4, \
fit_params={'class_weight': {1 : 1, -1 : 1}})
else:
self.model = SVC()
self.scale_factors, Xstd = standardize(X)
self.model.fit(Xstd, gnd)
def predict(self, chip):
"""Use the trained SVM model to predict.
Parameters:
----------
chip: chip model object
Contains a chip's test data
"""
X = standardize(chip.LCT.values(), self.scale_factors)
return self.model.predict(X)
| mit |
flightgong/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 44 | 7031 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
"""Test partial dependence for classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
"""Test partial dependence for multi-class classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
"""Test partial dependence for regressor """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
"""Test input validation of partial dependence. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
"""Test partial dependence plot function. """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
"""Test partial dependence plot function input checks. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
"""Test partial dependence plot function on multi-class input. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
MadsJensen/agency_connectivity | phase_analysis_no_steps_x-freq.py | 1 | 3644 | # -*- coding: utf-8 -*-
"""
@author: mje
@emai: mads@cnru.dk
"""
import numpy as np
# import mne
import matplotlib.pyplot as plt
import pandas as pd
from itertools import combinations
from my_settings import *
plt.style.use("ggplot")
b_df = pd.read_csv(
"/Users/au194693/projects/agency_connectivity/data/behavioural_results.csv")
def calc_ISPC_time_between(data, chan_1=52, chan_2=1):
result = np.empty([data.shape[0]])
for i in range(data.shape[0]):
result[i] = np.abs(
np.mean(
np.exp(1j * (np.angle(data[i, chan_1, window_start:window_end])
- np.angle(data[i, chan_2, window_start:
window_end])))))
return result
label_dict = {"ba_1_4_r": [1, 52],
"ba_1_4_l": [0, 51],
"ba_4_4": [51, 52],
"ba_1_1": [0, 1]}
# "ba_4_39_l": [49, 51],
# "ba_4_39_r": [50, 52],
# "ba_39_39": [49, 50]}
bands = ["delta", "theta", "alpha", "beta", "gamma1", "gamma2"]
# bands = ["beta"]
bands_numbers = list(np.arange(0, len(bands), 1))
cross_band_combinations = list(combinations(bands_numbers, 2))
# subjects = ["p9"]
labels = list(np.load(data_path + "label_names.npy"))
times = np.arange(-2000, 2001, 1.95325)
times = times / 1000.
window_start, window_end = 768, 1024
results_all = pd.DataFrame()
for subject in subjects:
print("Working on: " + subject)
# ht_vol = np.load(tf_folder + "/%s_vol_HT-comp.npy" %
# subject)
for comb in cross_band_combinations:
ht_invol = np.load(tf_folder + "%s_inv_HT-comp.npy" % subject)
b_tmp = b_df[(b_df.subject == subject) & (b_df.condition == "invol"
)].reset_index()
for k, band in enumerate(bands):
k = 3
# results_invol = {}
ht_invol_band = ht_invol[-89:, :, :, k]
for lbl in label_dict.keys():
res = pd.DataFrame(
calc_ISPC_time_between(
ht_invol_band,
chan_1=label_dict[lbl][0], chan_2=label_dict[lbl][1]),
columns=["ISPC"])
res["subject"] = subject
res["label"] = lbl
res["binding"] = b_tmp.binding
res["trial_status"] = b_tmp.trial_status
res["condition"] = "testing"
res["band"] = band
res["trial_nr"] = np.arange(2, 91, 1)
results_all = results_all.append(res)
print("Working on: " + subject)
# ht_vol = np.load(tf_folder + "/%s_vol_HT-comp.npy" %
# subject)
ht_vol = np.load(tf_folder + "%s_vol_HT-comp.npy" % subject)
b_tmp = b_df[(b_df.subject == subject) & (b_df.condition == "vol"
)].reset_index()
for k, band in enumerate(bands):
k = 3
# Results_vol = {}
ht_vol_band = ht_vol[-89:, :, :, k]
for lbl in label_dict.keys():
res = pd.DataFrame(
calc_ISPC_time_between(
ht_vol_band,
chan_1=label_dict[lbl][0], chan_2=label_dict[lbl][1]),
columns=["ISPC"])
res["subject"] = subject
res["label"] = lbl
res["binding"] = b_tmp.binding
res["trial_status"] = b_tmp.trial_status
res["condition"] = "learning"
res["band"] = band
res["trial_nr"] = np.arange(2, 91, 1)
results_all = results_all.append(res)
| bsd-3-clause |
thjashin/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
kmike/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 4 | 3898 | import pickle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.metrics import f1_score, r2_score, auc_score, fbeta_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics import SCORERS, Scorer
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs, load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
def test_classification_scores():
X, y = make_blobs(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['f1'](clf, X_test, y_test)
score2 = f1_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = Scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scores():
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = SCORERS['r2'](clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scores():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = auc_score(y_test, clf.decision_function(X_test))
score3 = auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
def test_unsupervised_scores():
# test clustering where there is some true y.
# We don't have any real unsupervised SCORERS yet
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = SCORERS['ari'](km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
def test_raises_on_score_list():
# test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = Scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
| bsd-3-clause |
mitocw/content-mit-latex2edx-demo | static/js/jsxgraph/server/fft.py | 2 | 4429 | from JXGServerModule import JXGServerModule
import numpy
import numpy.fft
import wave, struct, uuid
import os, subprocess
import StringIO, gzip, base64
import datetime, math, random
# Should be changed to something more persistent but must be writable by
# the webserver (usually user www-data)
#if not 'MPLCONFIGDIR' in os.environ:
# os.environ['MPLCONFIGDIR'] = '/tmp/'
# os.environ['MPLCONFIGDIR'] = 'C:/xampp/tmp'
#import matplotlib
#import matplotlib.pyplot as plt
class FFT(JXGServerModule):
def __init__(self):
JXGServerModule.__init__(self)
def init(self, resp):
resp.addHandler(self.fft, 'function(data) { }')
resp.addHandler(self.ifft, 'function(data) { }')
resp.addHandler(self.cutoutrange, 'function(data) { }')
resp.addHandler(self.makeAudio, 'function(data) { }')
resp.addHandler(self.loadAudio, 'function(data) { }')
resp.addHandler(self.sampleifft, 'function(data) { }')
return
def fft(self, resp, x):
y = numpy.fft.rfft(x)
y = map(abs, y);
resp.addData('y', y)
return
def _real(self, val):
return val.real
def ifft(self, resp, x):
y = numpy.fft.irfft(x)
y = map(self._real, y);
resp.addData('y', y)
return
def _set0(val):
return 0
def sampleifft(self, resp, name, s, e, factor):
# read wav
pathtowavefiles = '/share8/home/michael/www-store/audio/'
fname = pathtowavefiles + os.path.basename(name) + '.wav'
w = wave.open(fname, 'r')
(nchannels, sampwidth, framerate, nframes, comptype, compname) = w.getparams()
frames = w.readframes(nframes*nchannels)
out = map(lambda value: value/8192., struct.unpack_from("%dh" % nframes * nchannels, frames))
w.close()
# apply fft
x = numpy.fft.rfft(out)
# filters
l = len(x)
for i in range(0, s):
x[i] = x[i] * factor
for i in range(e, l):
x[i] = x[i] * factor
#ifft
y = numpy.fft.irfft(x)
y = map(self._real, y);
resp.addData('y', y)
self.makeAudio(resp, 'ogg', framerate, y)
return
# s: 0 < Start < len(x)/2
# e: 0 < End < len(x)/2
def cutoutrange(self, resp, x, s, e, factor):
l = len(x)
for i in range(0, s):
x[i] = x[i] * factor
for i in range(e, l):
x[i] = x[i] * factor
resp.addData('y', x)
return
def loadAudio(self, resp, type, name):
pathtowavefiles = '/share8/home/michael/www-store/audio/'
fname = pathtowavefiles + os.path.basename(name) + '.wav'
fogg = pathtowavefiles + os.path.basename(name) + '.ogg'
# read ogg
f = open(fogg, "r")
audio = f.read()
audio = "data:audio/ogg;base64," + base64.b64encode(audio)
resp.addData('audioB64', audio)
# read wav
w = wave.open(fname, 'r')
(nchannels, sampwidth, framerate, nframes, comptype, compname) = w.getparams()
frames = w.readframes(nframes*nchannels)
out = map(lambda value: value/8192., struct.unpack_from("%dh" % nframes * nchannels, frames))
w.close()
step = math.floor(len(out)/7500);
#resp.addData('audioData', [out[i] for i in range(len(out)) if i % step == 0]);
resp.addData('audioData', out);
resp.addData('seconds', (nframes*1.0)/framerate)
resp.addData('samplerate', framerate)
return
def makeAudio(self, resp, type, samplerate, data):
fname = '/tmp/'+str(uuid.uuid4())
fogg = fname + '.ogg'
w = wave.open(fname, 'w')
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(samplerate)
w.setnframes(len(data))
for s in data:
if s < -4:
s = -4
if s > 4:
s = 4
w.writeframes(struct.pack('h', int(s*4000)))
w.close()
ogg_process = subprocess.Popen(["oggenc", fname, "-Q", "-o", fogg], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
output = ogg_process.communicate('')[0]
f = open(fogg, "r")
audio = f.read()
audio = "data:audio/ogg;base64," + base64.b64encode(audio)
resp.addData('audioB64', audio)
os.remove(fname)
os.remove(fogg)
return
| mit |
maschwanden/boxsimu | tests/test_boxmodelsystem1.py | 1 | 13815 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 10:45:10 2016
@author: Mathias Aschwanden (mathias.aschwanden@gmail.com)
"""
import os
import unittest
from unittest import TestCase
import sys
import copy
import numpy as np
import datetime
from matplotlib import pyplot as plt
if not os.path.abspath(__file__ + "/../../../") in sys.path:
sys.path.append(os.path.abspath(__file__ + "/../../../"))
from boxsimu.entities import Fluid, Variable
from boxsimu.box import Box
from boxsimu.transport import Flow, Flux
from boxsimu.condition import Condition
from boxsimu.system import BoxModelSystem
from boxsimu.process import Process, Reaction
from boxsimu.solver import Solver
from boxsimu import utils
from boxsimu.simulations import boxmodelsystem1
from boxsimu import ur
class BoxModelSystem1Test(TestCase):
"""Test boxsimu framework using a simple box model.
The model tested herein is described in the
book "Modelling Methods for Marine Science".
"""
def setUp(self, *args, **kwargs):
self.system = boxmodelsystem1.get_system()
self.solver = Solver(self.system)
self.uo = self.system.boxes.upper_ocean
self.do = self.system.boxes.deep_ocean
def tearDown(self, *args, **kwargs):
del(self.system)
del(self.solver)
del(self.uo)
del(self.do)
def assertPintQuantityAlmostEqual(self, q1, q2, **kwargs):
q1 = q1.to_base_units()
q2 = q2.to_base_units()
self.assertAlmostEqual(q1.magnitude, q2.magnitude, **kwargs)
self.assertEqual(q1.units, q2.units)
#####################################################
# Box Functions
#####################################################
def test_mass(self):
self.assertEqual(self.system.boxes.upper_ocean.mass,
3e16*1020*ur.kg)
self.assertEqual(self.system.boxes.deep_ocean.mass,
1e18*1030*ur.kg)
def test_volume(self):
upper_ocean_context = self.system.get_box_context(
self.system.boxes.upper_ocean)
deep_ocean_context = self.system.get_box_context(
self.system.boxes.deep_ocean)
self.assertEqual(self.system.boxes.upper_ocean.get_volume(
upper_ocean_context), 3e16*1020/1000.0 * ur.meter**3)
self.assertEqual(self.system.boxes.deep_ocean.get_volume(
upper_ocean_context), 1e18*1030/1000.0 * ur.meter**3)
def test_concentration(self):
pass
#####################################################
# System Base Functions
#####################################################
def test_box_id(self):
self.assertEqual(self.system.boxes.upper_ocean.id, 1)
self.assertEqual(self.system.boxes.deep_ocean.id, 0)
def test_variable_id(self):
self.assertEqual(self.system.variables.po4.id, 0)
def test_N_boxes(self):
self.assertEqual(self.system.N_boxes, 2)
def test_N_variables(self):
self.assertEqual(self.system.N_variables, 1)
def test_context_of_box(self):
upper_ocean = self.system.boxes.upper_ocean
deep_ocean = self.system.boxes.deep_ocean
global_context = self.system.get_box_context()
upper_ocean_context = self.system.get_box_context(upper_ocean)
deep_ocean_context = self.system.get_box_context(deep_ocean)
# Test accessability of the condition attributes
self.assertEqual(global_context.T, 111 * ur.kelvin)
self.assertEqual(upper_ocean_context.T, 333 * ur.kelvin)
self.assertEqual(deep_ocean_context.T, 222 * ur.kelvin)
# Test the accessability of the condition attributes of other boxes:
self.assertEqual(global_context.upper_ocean.condition.T,
333 * ur.kelvin)
self.assertEqual(global_context.deep_ocean.condition.T,
222 * ur.kelvin)
self.assertEqual(upper_ocean_context.global_condition.T,
111 * ur.kelvin)
self.assertEqual(upper_ocean_context.upper_ocean.condition.T,
333 * ur.kelvin)
self.assertEqual(upper_ocean_context.deep_ocean.condition.T,
222 * ur.kelvin)
self.assertEqual(deep_ocean_context.global_condition.T,
111 * ur.kelvin)
self.assertEqual(deep_ocean_context.upper_ocean.condition.T,
333 * ur.kelvin)
self.assertEqual(deep_ocean_context.deep_ocean.condition.T,
222 * ur.kelvin)
def test_context_evaluation_lambda_func(self):
upper_ocean = self.system.boxes.upper_ocean
deep_ocean = self.system.boxes.deep_ocean
global_context = self.system.get_box_context()
upper_ocean_context = self.system.get_box_context(upper_ocean)
deep_ocean_context = self.system.get_box_context(deep_ocean)
lambda1 = lambda t, c: c.T / (111*ur.kelvin)
self.assertEqual(lambda1(0*ur.second, global_context), 1)
self.assertEqual(lambda1(0*ur.second, upper_ocean_context), 3)
self.assertEqual(lambda1(0*ur.second, deep_ocean_context), 2)
lambda2 = lambda t, c: (t / ur.second + (c.T / (111*ur.kelvin)) +
c.upper_ocean.condition.T / (111*ur.kelvin))
self.assertEqual(lambda2(0*ur.second, global_context), 4)
self.assertEqual(lambda2(0*ur.second, upper_ocean_context), 6)
self.assertEqual(lambda2(0*ur.second, deep_ocean_context), 5)
self.assertEqual(lambda2(100*ur.second, global_context), 104)
self.assertEqual(lambda2(100*ur.second, upper_ocean_context), 106)
self.assertEqual(lambda2(100*ur.second, deep_ocean_context), 105)
# Set the variable concentration to nonzero values in a copy of system:
self.system.boxes.upper_ocean.variables.po4.mass = 5 * ur.kg
self.system.boxes.deep_ocean.variables.po4.mass = 10 * ur.kg
global_context = self.system.get_box_context()
upper_ocean_context = self.system.get_box_context(upper_ocean)
deep_ocean_context = self.system.get_box_context(deep_ocean)
lambda3 = lambda t, c: (t / ur.second + (c.T / (111*ur.kelvin)) +
c.upper_ocean.condition.T / (111*ur.kelvin) + (c.po4/ur.kg)**2)
self.assertEqual(lambda3(100*ur.second, upper_ocean_context), 131)
self.assertEqual(lambda3(100*ur.second, deep_ocean_context), 205)
lambda4 = lambda t, c: ((c.po4/ur.kg) /
(c.upper_ocean.variables.po4/ur.kg))
self.assertEqual(lambda4(0*ur.second, upper_ocean_context), 1)
self.assertEqual(lambda4(0*ur.second, deep_ocean_context), 2)
#####################################################
# Fluid and Variable Mass/Concentration Vectors/Matrices
#####################################################
def test_fluid_mass_1Darray(self):
m = self.system.get_fluid_mass_1Darray()
self.assertEqual(m[self.uo.id], 3e16*1020*ur.kg)
self.assertEqual(m[self.do.id], 1e18*1030*ur.kg)
def test_variable_mass_1Darray(self):
po4 = self.system.variables['po4']
m = self.system.get_variable_mass_1Darray(po4)
self.assertEqual(m[self.uo.id], 0*ur.kg)
self.assertEqual(m[self.do.id], 0*ur.kg)
def test_variable_concentration_1Darray(self):
po4 = self.system.variables['po4']
c = self.system.get_variable_concentration_1Darray(po4)
self.assertEqual(c[self.uo.id], 0 * ur.dimensionless)
self.assertEqual(c[self.do.id], 0 * ur.dimensionless)
#####################################################
# Mass Flow Vectors/Matrices
#####################################################
def test_fluid_mass_internal_flow_2Darray(self):
A = self.system.get_fluid_mass_internal_flow_2Darray(0*ur.second,
self.system.flows)
# Check that diagonal elements are zero
for i in range(self.system.N_boxes):
self.assertEqual(A[i][i], 0 * ur.kg/ur.second)
# Check that the other values are set correctly
uo_do_exchange_rate = (6e17*ur.kg/ur.year).to_base_units()
self.assertEqual(A[self.uo.id][self.do.id], uo_do_exchange_rate)
self.assertEqual(A[self.do.id][self.uo.id], uo_do_exchange_rate)
def test_fluid_mass_flow_sink_1Darray(self):
s = self.system.get_fluid_mass_flow_sink_1Darray(0*ur.second,
self.system.flows)
# Upper Ocean Sink: Due to evaporation (3e16)
evaporation_rate = (3e16*ur.kg/ur.year).to_base_units()
self.assertEqual(s[self.uo.id], evaporation_rate)
self.assertEqual(s[self.do.id], 0 * ur.kg/ur.second)
def test_fluid_mass_flow_source_1Darray(self):
q = self.system.get_fluid_mass_flow_source_1Darray(0*ur.second,
self.system.flows)
# Upper Ocean Source: Due to river discharge (3e16)
river_discharge_rate = (3e16*ur.kg/ur.year).to_base_units()
self.assertEqual(q[self.uo.id], river_discharge_rate)
self.assertEqual(q[self.do.id], 0 * ur.kg/ur.second)
#####################################################
# Variable Sink/Source Vectors
#####################################################
def test_variable_internal_flow_2Darray(self):
var = self.system.variables['po4']
f_flow = np.ones(self.system.N_boxes)
A = self.system.get_variable_internal_flow_2Darray(var, 0*ur.second,
f_flow)
# Check that diagonal elements are zero
for i in range(self.system.N_boxes):
self.assertEqual(A[i][i], 0 * ur.kg/ur.year)
self.assertEqual(A[self.uo.id][self.do.id], 0 * ur.kg/ur.year)
self.assertEqual(A[self.do.id][self.uo.id], 0 * ur.kg/ur.year)
#########
# Alternative Test with non-zero concentrations in the boxes:
uo = self.system.boxes.upper_ocean
do = self.system.boxes.deep_ocean
uo.variables.po4.mass = 3.06e11*ur.kg
do.variables.po4.mass = 1.03e14*ur.kg
var = self.system.variables['po4']
f_flow = np.ones(self.system.N_boxes)
A = self.system.get_variable_internal_flow_2Darray(var, 0*ur.second,
f_flow)
# Check that diagonal elements are zero
for i in range(self.system.N_boxes):
self.assertEqual(A[i][i], 0 * ur.kg/ur.second)
# Mass Flow from upper_to_deep ocean:
uo_do_exchange_rate = (6e17*ur.kg/ur.year).to_base_units()
# Mass upper_ocean: 3e16*1020kg = 3.06e19
# PO4 mass upper_ocean: 3.06e11kg
# PO4 concentration upper_ocean: 3.06e11 / 3.06e19 = 1e-8
# Transported PO4 from upper to deep ocean: uo_do_exchange_rate * 1e-8
self.assertPintQuantityAlmostEqual(A[uo.id][do.id],
1e-8*uo_do_exchange_rate, places=2)
# Mass deep_ocean: 1e18*1030kg = 1.03e21
# PO4 mass deep_ocean: 1.03e14kg
# PO4 concentration deep_ocean: 1.03e14 / 1.03e21 = 1e-7
# Transported PO4 from upper to deep ocean: uo_do_exchange_rate * 1e-7
self.assertPintQuantityAlmostEqual(A[do.id][uo.id],
1e-7*uo_do_exchange_rate, places=2)
def test_variable_flow_sink_1Darray(self):
var = self.system.variables['po4']
f_flow = np.ones(self.system.N_boxes)
s = self.system.get_variable_flow_sink_1Darray(var, 0*ur.second, f_flow)
self.assertEqual(s[self.uo.id], 0*ur.kg/ur.second)
self.assertEqual(s[self.do.id], 0*ur.kg/ur.second)
def test_variable_flow_source_1Darray(self):
var = self.system.variables['po4']
q = self.system.get_variable_flow_source_1Darray(var, 0*ur.second)
river_discharge_rate = (3e16*ur.kg/ur.year).to_base_units()
# Upper Ocean Source: 3e16 * 4.6455e-8 = 1393650000.0
self.assertPintQuantityAlmostEqual(q[self.uo.id],
river_discharge_rate*4.6455e-8)
self.assertEqual(q[self.do.id], 0 * ur.kg/ur.second)
def test_variable_process_sink_1Darray(self):
var = self.system.variables['po4']
s = self.system.get_variable_process_sink_1Darray(var, 1*ur.second)
self.assertEqual(s[self.uo.id], 0 * ur.kg/ur.second)
self.assertEqual(s[self.do.id], 0 * ur.kg/ur.second)
def test_variable_process_source_1Darray(self):
var = self.system.variables['po4']
q = self.system.get_variable_process_source_1Darray(var, 0*ur.second)
self.assertEqual(q[self.uo.id], 0 * ur.kg/ur.second)
self.assertEqual(q[self.do.id], 0 * ur.kg/ur.second)
def test_variable_internal_flux_2Darray(self):
var = self.system.variables['po4']
A = self.system.get_variable_internal_flux_2Darray(var, 0*ur.second)
self.assertEqual(A[self.uo.id][self.do.id], 0 * ur.kg/ur.second)
self.assertEqual(A[self.do.id][self.uo.id], 0 * ur.kg/ur.second)
def test_variable_flux_sink_1Darray(self):
var = self.system.variables['po4']
s = self.system.get_variable_flux_sink_1Darray(var, 1*ur.second)
self.assertEqual(s[self.uo.id], 0 * ur.kg/ur.second)
self.assertEqual(s[self.do.id], 0 * ur.kg/ur.second)
def test_variable_flux_source_1Darray(self):
var = self.system.variables['po4']
q = self.system.get_variable_flux_source_1Darray(var, 0*ur.second)
self.assertEqual(q[self.uo.id], 0 * ur.kg/ur.second)
self.assertEqual(q[self.do.id], 0 * ur.kg/ur.second)
def test_reaction_rate_cube(self):
C = self.system.get_reaction_rate_3Darray(0*ur.second)
self.assertEqual(C[self.uo.id, 0, 0], 0 * ur.kg/ur.second)
self.assertEqual(C[self.do.id, 0, 0], 0 * ur.kg/ur.second)
if __name__ == "__main__":
unittest.main()
| mit |
akrherz/iem | scripts/climodat/assign_default_hour.py | 1 | 1500 | """Sample obs to see what our default times are."""
# Third party
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn, logger
LOG = logger()
def main():
"""Go Main Go."""
df = read_sql(
"SELECT iemid, id, temp24_hour, precip24_hour from stations WHERE "
"network ~* 'CLIMATE' and (temp24_hour is null or "
"precip24_hour is null) ORDER by id ASC",
get_dbconn("mesosite"),
index_col="id",
)
mesosite = get_dbconn("mesosite")
mcursor = mesosite.cursor()
coop = get_dbconn("coop")
for col in ["temp24_hour", "precip24_hour"]:
for sid, row in df[pd.isna(df[col])].iterrows():
df2 = read_sql(
f"SELECT {col.replace('24', '')} as datum, count(*), "
f"min(day), max(day) from alldata_{sid[:2]} WHERE "
f"{col.replace('24', '')} is not null and "
f"{col.replace('24_hour', '')}_estimated = 'f' GROUP by datum "
"ORDER by count DESC",
coop,
index_col=None,
)
if df2.empty:
continue
newval = int(df2.iloc[0]["datum"])
LOG.info("Setting %s for %s to %s", col, sid, newval)
mcursor.execute(
f"UPDATE stations SET {col} = %s WHERE iemid = %s",
(newval, row["iemid"]),
)
mcursor.close()
mesosite.commit()
if __name__ == "__main__":
main()
| mit |
james-nichols/dtrw | compartment_models/PBPK_test.py | 1 | 5547 | #!/usr/local/bin/python3
# Libraries are in parent directory
import sys
sys.path.append('../')
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pdb
from dtrw import *
class DTRW_PBPK(DTRW_compartment):
def __init__(self, X_inits, T, dT, V, Q, R, mu, Vmax, Km, g, g_T):
if len(X_inits) != 6:
# Error!
print("Need six initial points")
raise SystemExit
super(DTRW_PBPK, self).__init__(X_inits, T, dT)
self.Vs = np.array(V)
self.Qs = np.array(Q)
self.Rs = np.array(R)
self.mu = mu
self.Vmax = Vmax
self.Km = Km
self.g = g
self.g_T = g_T
def creation_flux(self, n):
g_N = 0.
if (n * self.dT < self.g_T):
g_N = self.g * self.dT
creation = np.zeros(self.n_species)
creation[:-1] = self.removal_flux_markovian(n)[5,:]
creation[-1] = (self.removal_flux_markovian(n)[:5, 0]).sum() + g_N
return creation
"""return np.array([(1. - np.exp(-self.dT * self.Qs[0] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[1] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[2] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[3] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[4] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[0] / (self.Vs[0] * self.Rs[0]))) * self.Xs[0,n] + \
(1. - np.exp(-self.dT * self.Qs[1] / (self.Vs[1] * self.Rs[1]))) * self.Xs[1,n] + \
(1. - np.exp(-self.dT * self.Qs[2] / (self.Vs[2] * self.Rs[2]))) * self.Xs[2,n] + \
(1. - np.exp(-self.dT * self.Qs[3] / (self.Vs[3] * self.Rs[3]))) * self.Xs[3,n] + \
(1. - np.exp(-self.dT * self.Qs[4] / (self.Vs[4] * self.Rs[4]))) * self.Xs[4,n] + \
g_N ])"""
def removal_rates(self, n):
rates = np.zeros([self.n_species, 5])
rates[:-1, 0] = self.Qs / (self.Vs[:-1] * self.Rs)
rates[3, 1] = self.mu / self.Vs[3]
rates[4, 1] = self.Vmax / (self.Vs[4] * self.Km + self.Xs[4,n])
rates[5,:] = self.Qs / self.Vs[-1]
return rates
class DTRW_PBPK_anom(DTRW_compartment):
def __init__(self, X_inits, T, dT, V, Q, R, mu, Vmax, Km, g, g_T, alpha):
if len(X_inits) != 6:
# Error!
print("Need six initial points")
raise SystemExit
super(DTRW_PBPK_anom, self).__init__(X_inits, T, dT)
self.Vs = np.array(V)
self.Qs = np.array(Q)
self.Rs = np.array(R)
self.mu = mu
self.Vmax = Vmax
self.Km = Km
self.g = g
self.g_T = g_T
self.alpha = alpha
self.Ks[2] = calc_sibuya_kernel(self.N+1, self.alpha)
self.Ks[5] = calc_sibuya_kernel(self.N+1, self.alpha)
self.anom_rates = [None] * self.n_species
self.anom_rates[2] = self.Qs[2] / (self.Vs[2] * self.Rs[2])
self.anom_rates[5] = self.Qs[2] / (self.Vs[-1])
def creation_flux(self, n):
g_N = 0.
if (n * self.dT < self.g_T):
g_N = self.g * self.dT
creation = np.zeros(self.n_species)
creation[:-1] = self.removal_flux_markovian(n)[5,:]
creation[2] = self.removal_flux_anomalous(n)[5]
creation[-1] = (self.removal_flux_markovian(n)[:5, 0]).sum() + self.removal_flux_anomalous(n)[2] + g_N
return creation
def removal_rates(self, n):
rates = np.zeros([self.n_species, 5])
rates[:-1, 0] = self.Qs / (self.Vs[:-1] * self.Rs)
rates[2,0] = 0.
rates[3, 1] = self.mu / self.Vs[3]
rates[4, 1] = self.Vmax / (self.Vs[4] * self.Km + self.Xs[4,n])
rates[5,:] = self.Qs / self.Vs[-1]
rates[5,2] = 0.
return rates
T = 100.0
dT = 0.01
ts = np.arange(0., T, dT)
initial = [0., 0., 0., 0., 0., 0.]
mu = 0.5 # Kidney removal rate
V_max = 2.69
K_m = 0.59
# [P, R, F, K, L, A]
Vs = [28.6, 6.90, 15.10, 0.267, 1.508, 1.570]
Qs = [1.46, 1.43, 0.29, 1.14, 1.52]
Rs = [0.69, 0.79, 0.39, 0.80, 0.78]
alpha = 0.8
g = 1.0
g_T = 1.0
dtrw = DTRW_PBPK(initial, T, dT, Vs, Qs, Rs, mu, V_max, K_m, g, g_T)
dtrw_anom = DTRW_PBPK_anom(initial, T, dT, Vs, Qs, Rs, mu, V_max, K_m, g, g_T, alpha)
dtrw.solve_all_steps()
dtrw_anom.solve_all_steps()
max_level = max([dtrw.Xs[0,:].max(), dtrw.Xs[1,:].max(), dtrw.Xs[2,:].max(), dtrw.Xs[3,:].max(), dtrw.Xs[4,:].max(), dtrw.Xs[5,:].max()])
fig = plt.figure(figsize=(8,8))
plt.xlim(0,T)
plt.ylim(0,1.1 * max_level)
plt.xlabel('Time')
P, = plt.plot(ts, dtrw.Xs[0,:])
R, = plt.plot(ts, dtrw.Xs[1,:])
F, = plt.plot(ts, dtrw.Xs[2,:])
K, = plt.plot(ts, dtrw.Xs[3,:])
L, = plt.plot(ts, dtrw.Xs[4,:])
A, = plt.plot(ts, dtrw.Xs[5,:])
plt.legend([P, R, F, K, L, A], ["Poorly perfused", "Richly perfused", "Fatty tissue", "Kidneys", "Liver", "Arterial blood"])
Pa, = plt.plot(ts, dtrw_anom.Xs[0,:],'b:')
Ra, = plt.plot(ts, dtrw_anom.Xs[1,:],'g:')
Fa, = plt.plot(ts, dtrw_anom.Xs[2,:],'r:')
Ka, = plt.plot(ts, dtrw_anom.Xs[3,:],'c:')
La, = plt.plot(ts, dtrw_anom.Xs[4,:],'m:')
Aa, = plt.plot(ts, dtrw_anom.Xs[5,:],'y:')
plt.show()
T, = plt.plot(ts, dtrw.Xs.sum(0), 'k')
Ta, = plt.plot(ts, dtrw_anom.Xs.sum(0), 'k:')
plt.show()
| gpl-2.0 |
DSLituiev/scikit-learn | sklearn/metrics/tests/test_classification.py | 15 | 54365 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"]) * -1
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true,
rng.randint(-100, 100) * np.ones(20, dtype=int))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
fyffyt/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ucsd-progsys/ml2 | learning/randomforest2.py | 2 | 3950 | import math
import os.path
import random
random.seed()
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas as pd
import input_old
csvs2 = [f for f in os.listdir('ml2/data/fa15/op+context-count+type+size') if f.endswith('.csv')]
csvs = [f for f in os.listdir('ml2/data/sp14/op+context-count+type+size') if f.endswith('.csv')]
random.shuffle(csvs)
dfs = []
test = []
train = []
for csv in csvs:
df, fs, ls = input_old.load_csv(os.path.join('ml2/data/sp14/op+context-count+type+size', csv), filter_no_labels=True, only_slice=False)
if df is None:
continue
if df.shape[0] == 0:
continue
train.append(df)
for csv in csvs2:
df2, fs2, ls2 = input_old.load_csv(os.path.join('ml2/data/fa15/op+context-count+type+size', csv), filter_no_labels=True, only_slice=False)
if df2 is None:
continue
if df2.shape[0] == 0:
continue
test.append(df2)
train = pd.concat(train)
test = pd.concat(test)
# print (len(test))
# print (len(train))
# print (df.shape)
# print df
classes = list(train.groupby(ls2))
#print(ls)
max_samples = max(len(c) for _, c in classes)
train = pd.concat(c.sample(max_samples, replace=True) for _, c in classes)
# print (len(train))
#print df.shape
#print type(df)
#list_keys = [ k for k in df ]
#print list_keys
# print samps
#print sum(df['L-DidChange'].values)
# print df['L-DidChange'].index
train_samps = train.loc[:,'F-InSlice':]
train_labels = train.loc[:,'L-DidChange']
# print test
test_samps = test.loc[:,'F-InSlice':]
test_labels = test.loc[:,'L-DidChange']
test_span = test.loc[:,'SourceSpan']
# print test.iloc[1]
# print test.values[1]
# dflist = []
# keylist = []
# for key, value in df.iteritems():
# temp = value
# tempk = key
# dflist.append(temp)
# keylist.append(tempk)
# Y = dflist[0]
# X = dflist[2:]
#clf = tree.DecisionTreeClassifier()
clf = RandomForestClassifier(n_estimators=30)
clf = clf.fit(train_samps.values, train_labels.values)
# print test_samps
# print test_samps.values
anses = clf.predict(test_samps.values)
# print anses
# print test_labels.values
# print sum(anses)/len(anses)
# print sum(test_labels.values)/len(test_labels.values)
#testanses =test_labels.values
resacc = anses + 2*test_labels.values
acc = 1-((sum(abs(anses - test_labels.values)))/3600)
lol = test_labels.add((-1)*anses)
#print lol
#print map(lambda x : clf.predict_proba(x), test_samps.values)
prob_score = clf.predict_proba(test_samps.values)
prob_error = [item[1] for item in prob_score]
# print prob_error
ll = zip(prob_error, anses, test_labels.values, test_span)
score = pd.DataFrame(data=ll, index=test_labels.index, columns=['Error Probability','predictions', 'actual' ,'SourceSpan'])
# print score
# print 'recall is ' + str(sum(anses * test_labels.values)/sum(test_labels.values))
# print 'precision is ' + str(sum(anses * test_labels.values)/sum(anses))
yay1 = 0
yay2 = 0
yay3 = 0
tots = 0
tp = 0
for labelind in list(set(test_labels.index)):
#print labelind
temp = score.loc[labelind]
temp = temp.values
# print labelind
if len(temp) < 3:
continue
tots = tots+1
topn = temp[np.argsort(temp[:,0])]
filenm = str(labelind).split('.')
f = open('randomforest_results/' + filenm[0] +'.out', "w+")
for preds in topn:
if preds[1] == 1:
f.write(str(preds[3]) + '\n')
f.close()
# print topn
# print 'lol'
# print topn[-3:]
a3 = 0
a2 = 0
a1 = 0
if (topn[-3][1] == 1 and topn[-3][2] == 1) :
a3 = 1
tp = tp+1
if (topn[-2][1] == 1 and topn[-2][2] == 1) :
a3 = 1
a2 = 1
tp = tp+1
if (topn[-1][1] == 1 and topn[-1][2] == 1) :
a3 = 1
a2 = 1
a1 = 1
tp = tp+1
yay1 = yay1+a1
yay2 = yay2+a2
yay3 = yay3+a3
print "precision for top 3"
print 'top 1'
print float(yay1)/tots
print 'top 2'
print float(yay2)/tots
print 'top 3'
print float(yay3)/tots
# print tots
# print tp
# print sum(test_labels.values)
print "recall for top 3"
print tp/sum(test_labels.values)
| bsd-3-clause |
HeraclesHX/scikit-learn | sklearn/lda.py | 72 | 17751 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
sebastien-forestier/NIPS2016 | ros/nips2016/src/nips2016/learning/learning.py | 1 | 4188 |
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import time
import datetime
from core.supervisor import Supervisor
class Learning(object):
def __init__(self, config, n_motor_babbling=0, explo_noise=0.1, choice_eps=0.2, enable_hand=True, normalize_interests=True):
self.config = config
self.n_motor_babbling = n_motor_babbling
self.explo_noise = explo_noise
self.choice_eps = choice_eps
self.enable_hand = enable_hand
self.normalize_interests = normalize_interests
self.agent = None
def produce(self, context, space=None, goal=None):
# context is the rotation of the ergo and the ball: "context = environment.get_current_context()"
if space is None:
if goal is None:
# Autonomous step
return self.agent.produce(context)
else:
assert goal in ["hand_up", "hand_forward", "hand_right", "hand_left",
"joystick_1_forward", "joystick_1_right", "joystick_1_left",
"joystick_2_forward", "joystick_2_right", "joystick_2_left",
"ergo_right", "ergo_left",
"ball_right", "ball_left",
"light", "sound"]
return self.agent.produce_goal(context, goal=goal)
else:
# Force space
assert space in ["s_hand", "s_joystick_1", "s_joystick_2", 's_ergo', "s_ball", "s_light", "s_sound"]
return self.agent.produce(context, space=space)
def perceive(self, s, m_demo=None, j_demo=False):
if m_demo is not None:
assert len(m_demo) == 32, len(m_demo)
assert len(s) == 132, len(s)
# Demonstration of a torso arm trajectory converted to weights with "m_demo = environment.torsodemo2m(m_traj)"
return self.agent.perceive(s, m_demo=m_demo)
elif j_demo:
assert len(s) == 132, len(s)
return self.agent.perceive(list(s[:2]) + list(s[32:]), j_demo=True)
else:
# Perception of environment when m was produced
assert len(s) == 132, len(s)
return self.agent.perceive(s)
def get_iterations(self): return self.agent.t
def get_normalized_interests(self): return self.agent.get_normalized_interests()
def get_normalized_interests_evolution(self): return self.agent.get_normalized_interests_evolution()
def get_last_focus(self): return self.agent.get_last_focus()
def get_space_names(self): return self.agent.get_space_names()
def motor_babbling(self): return self.agent.motor_babbling()
def get_data_from_file(self, file_path):
with open(file_path, 'r') as f:
data = pickle.load(f)
return data
def save(self, file_path):
data = self.agent.save()
with open(file_path, 'w') as f:
pickle.dump(data, f)
def start(self):
self.agent = Supervisor(self.config,
n_motor_babbling=self.n_motor_babbling,
explo_noise=self.explo_noise,
choice_eps=self.choice_eps,
enable_hand=self.enable_hand,
normalize_interests=self.normalize_interests)
def restart_from_end_of_file(self, file_path):
data = self.get_data_from_file(file_path)
self.start()
self.agent.forward(data, len(data["chosen_modules"]))
def restart_from_file(self, file_path, iteration):
data = self.get_data_from_file(file_path)
self.start()
self.agent.forward(data, iteration)
def plot(self):
fig, ax = plt.subplots()
ax.plot(self.get_normalized_interests_evolution(), lw=2)
ax.legend(["Hand", "Joystick_1", "Joystick_2", "Ergo", "Ball", "Light", "Sound"], ncol=3)
ax.set_xlabel('Time steps', fontsize=20)
ax.set_ylabel('Learning progress', fontsize=20)
plt.show(block=True)
| gpl-3.0 |
jaak-s/macau | python/macau/test_macau.py | 1 | 10384 | import unittest
import numpy as np
import pandas as pd
import scipy.sparse
import macau
import itertools
class TestMacau(unittest.TestCase):
def test_bpmf(self):
Y = scipy.sparse.rand(10, 20, 0.2)
Y, Ytest = macau.make_train_test(Y, 0.5)
results = macau.bpmf(Y, Ytest = Ytest, num_latent = 4,
verbose = False, burnin = 50, nsamples = 50,
univariate = False)
self.assertEqual(Ytest.nnz, results.prediction.shape[0])
self.assertTrue( (results.prediction.columns[0:2] == ["row", "col"]).all() )
def test_bpmf_numerictest(self):
X = scipy.sparse.rand(15, 10, 0.2)
Xt = 0.3
macau.bpmf(X, Xt, num_latent = 10, burnin=10, nsamples=15, verbose = False)
def test_macau(self):
Y = scipy.sparse.rand(10, 20, 0.2)
Y, Ytest = macau.make_train_test(Y, 0.5)
side1 = scipy.sparse.coo_matrix( np.random.rand(10, 2) )
side2 = scipy.sparse.coo_matrix( np.random.rand(20, 3) )
results = macau.macau(Y, Ytest = Ytest, side = [side1, side2], num_latent = 4,
verbose = False, burnin = 50, nsamples = 50,
univariate = False)
self.assertEqual(Ytest.nnz, results.prediction.shape[0])
def test_macau_side_bin(self):
X = scipy.sparse.rand(15, 10, 0.2)
Xt = scipy.sparse.rand(15, 10, 0.1)
F = scipy.sparse.rand(15, 2, 0.5)
F.data[:] = 1
macau.macau(X, Xt, side=[F, None], num_latent = 5, burnin=10, nsamples=5, verbose = False)
def test_macau_dense(self):
Y = scipy.sparse.rand(15, 10, 0.2)
Yt = scipy.sparse.rand(15, 10, 0.1)
F = np.random.randn(15, 2)
macau.macau(Y, Yt, side=[F, None], num_latent = 5, burnin=10, nsamples=5, verbose = False)
def test_macau_dense_probit(self):
A = np.random.randn(25, 2)
B = np.random.randn(3, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B"])
df["value"] = (np.array([ np.sum(A[i[0], :] * B[i[1], :]) for i in idx ]) > 0.0).astype(np.float64)
Ytrain, Ytest = macau.make_train_test_df(df, 0.2)
results = macau.macau(Y = Ytrain, Ytest = Ytest, side=[A, None], num_latent = 4,
verbose = False, burnin = 20, nsamples = 20,
univariate = False, precision = "probit")
self.assertTrue( (results.prediction.columns[0:2] == ["A", "B"]).all() )
self.assertTrue(results.auc_test > 0.55,
msg="Probit factorization (with dense side) gave AUC below 0.55 (%f)." % results.rmse_test)
def test_macau_univariate(self):
Y = scipy.sparse.rand(10, 20, 0.2)
Y, Ytest = macau.make_train_test(Y, 0.5)
side1 = scipy.sparse.coo_matrix( np.random.rand(10, 2) )
side2 = scipy.sparse.coo_matrix( np.random.rand(20, 3) )
results = macau.bpmf(Y, Ytest = Ytest, side = [side1, side2], num_latent = 4,
verbose = False, burnin = 50, nsamples = 50,
univariate = True)
self.assertEqual(Ytest.nnz, results.prediction.shape[0])
def test_too_many_sides(self):
Y = scipy.sparse.rand(10, 20, 0.2)
with self.assertRaises(ValueError):
macau.macau(Y, verbose = False, side = [None, None, None])
def test_bpmf_emptytest(self):
X = scipy.sparse.rand(15, 10, 0.2)
macau.bpmf(X, Ytest = 0, num_latent = 10, burnin=10, nsamples=15, verbose=False)
def test_bpmf_emptytest_probit(self):
X = scipy.sparse.rand(15, 10, 0.2)
X.data = X.data > 0.5
macau.bpmf(X, Ytest = 0, num_latent = 10, burnin=10, nsamples=15, precision="probit", verbose=False)
macau.bpmf(X, Ytest = None, num_latent = 10, burnin=10, nsamples=15, precision="probit", verbose=False)
def test_make_train_test(self):
X = scipy.sparse.rand(15, 10, 0.2)
Xtr, Xte = macau.make_train_test(X, 0.5)
self.assertEqual(X.nnz, Xtr.nnz + Xte.nnz)
diff = np.linalg.norm( (X - Xtr - Xte).todense() )
self.assertEqual(diff, 0.0)
def test_make_train_test_df(self):
idx = list( itertools.product(np.arange(10), np.arange(8), np.arange(3) ))
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.arange(10.0 * 8.0 * 3.0)
Ytr, Yte = macau.make_train_test_df(df, 0.4)
self.assertEqual(Ytr.shape[0], df.shape[0] * 0.6)
self.assertEqual(Yte.shape[0], df.shape[0] * 0.4)
A1 = np.zeros( (10, 8, 3) )
A2 = np.zeros( (10, 8, 3) )
A1[df.A, df.B, df.C] = df.value
A2[Ytr.A, Ytr.B, Ytr.C] = Ytr.value
A2[Yte.A, Yte.B, Yte.C] = Yte.value
self.assertTrue(np.allclose(A1, A2))
def test_bpmf_tensor(self):
np.random.seed(1234)
Y = pd.DataFrame({
"A": np.random.randint(0, 5, 7),
"B": np.random.randint(0, 4, 7),
"C": np.random.randint(0, 3, 7),
"value": np.random.randn(7)
})
Ytest = pd.DataFrame({
"A": np.random.randint(0, 5, 5),
"B": np.random.randint(0, 4, 5),
"C": np.random.randint(0, 3, 5),
"value": np.random.randn(5)
})
results = macau.bpmf(Y, Ytest = Ytest, num_latent = 4,
verbose = False, burnin = 50, nsamples = 50,
univariate = False)
def test_bpmf_tensor2(self):
A = np.random.randn(15, 2)
B = np.random.randn(20, 2)
C = np.random.randn(3, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Ytrain, Ytest = macau.make_train_test_df(df, 0.2)
results = macau.bpmf(Y = Ytrain, Ytest = Ytest, num_latent = 4,
verbose = False, burnin = 20, nsamples = 20,
univariate = False, precision = 50)
self.assertTrue(results.rmse_test < 0.5,
msg="Tensor factorization gave RMSE above 0.5 (%f)." % results.rmse_test)
def test_bpmf_tensor3(self):
A = np.random.randn(15, 2)
B = np.random.randn(20, 2)
C = np.random.randn(1, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Ytrain, Ytest = macau.make_train_test_df(df, 0.2)
results = macau.bpmf(Y = Ytrain, Ytest = Ytest, num_latent = 4,
verbose = False, burnin = 20, nsamples = 20,
univariate = False, precision = 50)
self.assertTrue(results.rmse_test < 0.5,
msg="Tensor factorization gave RMSE above 0.5 (%f)." % results.rmse_test)
Ytrain_sp = scipy.sparse.coo_matrix( (Ytrain.value, (Ytrain.A, Ytrain.B) ) )
Ytest_sp = scipy.sparse.coo_matrix( (Ytest.value, (Ytest.A, Ytest.B) ) )
results_mat = macau.bpmf(Y = Ytrain_sp, Ytest = Ytest_sp, num_latent = 4,
verbose = False, burnin = 20, nsamples = 20,
univariate = False, precision = 50)
def test_macau_tensor(self):
A = np.random.randn(30, 2)
B = np.random.randn(4, 2)
C = np.random.randn(2, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Ytrain, Ytest = macau.make_train_test_df(df, 0.2)
Acoo = scipy.sparse.coo_matrix(A)
results = macau.macau(Y = Ytrain, Ytest = Ytest, side=[Acoo, None, None], num_latent = 4,
verbose = False, burnin = 20, nsamples = 20,
univariate = False, precision = 50)
self.assertTrue( (results.prediction.columns[0:3] == ["A", "B", "C"]).all() )
self.assertTrue(results.rmse_test < 0.5,
msg="Tensor factorization gave RMSE above 0.5 (%f)." % results.rmse_test)
def test_macau_tensor_univariate(self):
A = np.random.randn(30, 2)
B = np.random.randn(4, 2)
C = np.random.randn(2, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Ytrain, Ytest = macau.make_train_test_df(df, 0.2)
Acoo = scipy.sparse.coo_matrix(A)
results = macau.macau(Y = Ytrain, Ytest = Ytest, side=[Acoo, None, None], num_latent = 4,
verbose = False, burnin = 20, nsamples = 20,
univariate = True, precision = 50)
self.assertTrue(results.rmse_test < 0.5,
msg="Tensor factorization gave RMSE above 0.5 (%f)." % results.rmse_test)
def test_macau_tensor_empty(self):
A = np.random.randn(30, 2)
B = np.random.randn(4, 2)
C = np.random.randn(2, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Acoo = scipy.sparse.coo_matrix(A)
r0 = macau.macau(df, Ytest = 0, num_latent = 2, burnin=5, nsamples=5, precision=1.0, verbose=False)
r1 = macau.macau(df, Ytest = None, num_latent = 2, burnin=5, nsamples=5, precision=1.0, verbose=False)
self.assertTrue( np.isnan(r0.rmse_test) )
self.assertTrue( np.isnan(r1.rmse_test) )
if __name__ == '__main__':
unittest.main()
| mit |
jesserobertson/pynoddy | pynoddy/experiment/SensitivityAnalysis.py | 2 | 12280 | from pynoddy.experiment import Experiment
import numpy as np
class SensitivityAnalysis(Experiment):
'''Sensitivity analysis experiments for kinematic models
Sensitivity analysis with methods from the SALib package:
https://github.com/jdherman/SALib
'''
#from SALib.sample import saltelli
def __init__(self, history=None, **kwds):
'''Combination of input and output methods for complete kinematic experiments with NOddy
**Optional Keywords**:
- *his_file* = string : filename of Noddy history input file
'''
super(Experiment, self).__init__(history)
def create_params_file(self, **kwds):
"""Create params file from defined parameter statistics for SALib analysis
Note: parameter statistics have to be defined in self.param_stats dictionary
(use self.set_parameter_statistics)
**Optional keywords**:
- *filename* = string : name of parameter file (default: params_file_tmp.txt)
"""
filename = kwds.get("filename", "params_file_tmp.txt")
if not hasattr(self, "param_stats"):
raise AttributeError("Please define parameter statistics dictionary first (define with self.set_parameter_statistics) ")
f = open(filename, 'w')
for param in self.param_stats:
# create a meaningful name for the parameter
par_name = "ev_%d_%s" % (param['event'], param['parameter'].replace (" ", "_"))
f.write("%s %f %f\n" % (par_name, param['min'], param['max']))
f.close()
def add_sampling_line(self, x, y, **kwds):
"""Define a vertical sampling line, for example as a drillhole at position (x,y)
As default, the entire length for the model extent is exported. Ohter depth ranges
can be defined with optional keywords.
**Arguments**:
- *x* = float: x-position of drillhole
- *y* = float: y-position of drillhole
**Optional keywords**:
- *z_min* = float : minimum z-value (default: model origin)
- *z_max* = float : maximum z-value (default: surface)
- *label* = string : add a label to line (e.g. drillhole name, location, etc.)
"""
if not hasattr(self, "sampling_lines"):
self.sampling_lines= {}
self.get_extent()
self.get_origin()
z_min = kwds.get("z_min", self.origin_z)
z_max = kwds.get("z_max", self.extent_z)
label = kwds.get('label', 'line %d' % len(self.sampling_lines))
self.sampling_lines[label] = {'x' : x, 'y' : y, 'z_min' : z_min, 'z_max' : z_max}
def distance(self, **kwds):
"""Calculate distance between current state and base model
The standard distance is calculated as L1 norm of relative stratigraphic difference
along sampling lines.
**Optional keywords**:
- *norm* = 'L1', 'L2' : norm to calculate distance
- *resolution* = float : model resolution to calculate distance at sampling lines
"""
# First step: get data along sampling lines and append to one long array
resolution = kwds.get("resolution", 1.0)
# test if sampling lines are defined
if not hasattr(self, "sampling_lines"):
raise AttributeError("Sampling lines are required to calculate distance!")
# get current line values:
current_lines = self.get_model_lines(resolution = resolution)
# check if model values along base line have previously been calculated
# and if they have the same resolution - if not, do that
if not hasattr(self, "base_model_lines") or (len(current_lines) != len(self.base_model_lines)):
self.get_model_lines(resolution = resolution, model_type = 'base')
# calculate distance:
distance = np.sum(np.abs(self.base_model_lines - current_lines)) / float(len(self.base_model_lines))
return distance
def determine_distances(self, **kwds):
"""Determine distances for a given parameter sets, based on defined sampling lines
**Optional keywords**:
- *param_values* = list of parameter values (as, for example, created by SALib methods)
- *resolution* = float : model resolution to calculate distance at sampling lines
"""
if kwds.has_key("param_values"):
param_values = kwds['param_values']
elif hasattr(self, 'param_values'):
param_values = self.param_values
else:
raise AttributeError("Please define paramter values as object variable or pass as keyword argument!")
# test if sampling lines are defined
if not hasattr(self, "sampling_lines"):
raise AttributeError("Sampling lines are required to calculate distance!")
# First step: get data along sampling lines and append to one long array
resolution = kwds.get("resolution", 1.0)
distances = []
# only for test - remove later!!
# import copy
# create model for each parameter set and calculate distance
for param_set in param_values:
param_values = {}
for i,param_val in enumerate(param_set):
# order of parameters in list corresponds to entires in self.param_stats:
param = self.param_stats[i]
# initialise parameter changes dictionary if it doesn't exist:
if not param_values.has_key(param['event']):
param_values[param['event']] = {}
param_values[param['event']][param['parameter']] = param_val
# self.events = copy.deepcopy(self.base_events)
# apply change to model:
self.set_event_params(param_values)
# calculated distance to base model for given resolution
distances.append(self.distance(resolution = resolution))
return distances
def get_model_lines(self, **kwds):
"""Get base model along the defined sampling lines
**Optional keywords**:
- *model_type* = 'base', 'current' : model type (select base to get freezed model)
- *resolution* = float : model resolution to calculate distance at sampling lines
"""
resolution = kwds.get("resolution", 1)
model_type = kwds.get("model_type", 'current')
import copy
tmp_his = copy.deepcopy(self)
current_lines = np.array([])
# get model for all sampling lines
for sl in self.sampling_lines.values():
# 2. set values
tmp_his.set_origin(sl['x'], sl['y'], sl['z_min'])
tmp_his.set_extent(resolution, resolution, sl['z_max'])
tmp_his.change_cube_size(resolution)
# test if base model:
if model_type == 'base':
# set base events:
tmp_his.events = self.base_events.copy()
elif model_type == 'current':
# use current model, do nothing for now
pass
else:
raise AttributeError("Model type %s not known, please check!" % model_type)
# 3. save temporary file
tmp_his_file = "tmp_1D_drillhole.his"
tmp_his.write_history(tmp_his_file)
tmp_out_file = "tmp_1d_out"
# 4. run noddy
import pynoddy
import pynoddy.output
pynoddy.compute_model(tmp_his_file, tmp_out_file)
# 5. open output
tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
# 6.
current_lines = np.append(current_lines, tmp_out.block[0,0,:])
# if base model: store as class variable:
# test if base model:
if model_type == 'base':
self.base_model_lines = current_lines
return current_lines
def perform_analsis(self, n=10, **kwds):
"""Perform Sobol sensitivity analysis with SALib methods
**Arguments**:
- *n* = int : number of sobol iterations (default: 10)
**Optional keywords**:
- *calc_second_order* = bool : second order stats (default: True)
"""
calc_second_order = kwds.get("calc_second_order", True)
# freeze base stats
self.freeze()
# import SALib method
from SALib.sample import saltelli
from SALib.analyze import sobol
# create temporary parameter file
param_file = "params_file_tmp.txt"
self.create_params_file(filename = param_file)
# perform sampling
self.param_values = saltelli.sample(10, param_file, calc_second_order = calc_second_order)
# calculate distances - compute intensive step!
self.distances = self.determine_distances()
# save results
results_file = 'dist_tmp.txt'
np.savetxt(results_file, self.distances, delimiter=' ')
# perform sobol analysis
Si = sobol.analyze(param_file, results_file,
column = 0,
conf_level = 0.95,
calc_second_order = calc_second_order,
print_to_console=False)
# create composite matrix for sensitivities
n_params = len(self.param_stats)
self.comp_matrix = np.ndarray(shape = (n_params,n_params))
for j in range(n_params):
for i in range(n_params):
if i == j:
self.comp_matrix[i,j] = Si['S1'][i]
else:
self.comp_matrix[i,j] = Si['S2'][i,j]
self.comp_matrix[j,i] = Si['S2'][i,j]
# remove temporary files
import os
os.remove(results_file)
os.remove(param_file)
def plot_sensitivity_matrix(self, **kwds):
"""Create a plot of the sensitivity matrix
**Optional keywords**:
- *savefig* = bool : save figure to file (default: show)
- *fig_filename* = string : figure filename (default: distances.png)
"""
import matplotlib.pyplot as plt
savefig = kwds.get("savefig", False)
fig_filename = kwds.get("fig_filename", "distances.png")
plt.rcParams['font.size'] = 15
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(self.comp_matrix, interpolation='nearest', cmap='RdBu_r',
vmax = np.max(np.abs(self.comp_matrix)),
vmin = -np.max(np.abs(self.comp_matrix)))
ax.yaxis.set_ticks_position("both")
ax.xaxis.set_ticks_position("top")
ax.set_xlabel("Parameter Sensitivities")
fig.colorbar(im)
plt.tight_layout()
if savefig:
plt.savefig(fig_filename)
else:
plt.show()
def plot_distances(self, **kwds):
"""Create diagnostic plot of calculated distances
**Optional keywords**:
- *savefig* = bool : save figure to file (default: show)
- *fig_filename* = string : figure filename (default: distances.png)
"""
import matplotlib.pyplot as plt
savefig = kwds.get("savefig", False)
fig_filename = kwds.get("fig_filename", "distances.png")
plt.rcParams['font.size'] = 15
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.distances, '.-k')
ax.set_title("Calculated distances")
ax.set_xlabel("Sensitivity step")
ax.set_ylabel("Distance")
plt.tight_layout()
if savefig:
plt.savefig(fig_filename)
else:
plt.show() | gpl-2.0 |
blackecho/Deep-Learning-TensorFlow | yadlt/models/linear/logistic_regression.py | 2 | 3960 | """Softmax classifier implementation using Tensorflow."""
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from yadlt.core import Evaluation, Loss
from yadlt.core import SupervisedModel
from yadlt.utils import tf_utils, utilities
class LogisticRegression(SupervisedModel):
"""Simple Logistic Regression using TensorFlow.
The interface of the class is sklearn-like.
"""
def __init__(self, name='lr', loss_func='cross_entropy',
learning_rate=0.01, num_epochs=10, batch_size=10):
"""Constructor."""
SupervisedModel.__init__(self, name)
self.loss_func = loss_func
self.learning_rate = learning_rate
self.num_epochs = num_epochs
self.batch_size = batch_size
self.loss = Loss(self.loss_func)
# Computational graph nodes
self.input_data = None
self.input_labels = None
self.W_ = None
self.b_ = None
self.accuracy = None
def build_model(self, n_features, n_classes):
"""Create the computational graph.
:param n_features: number of features
:param n_classes: number of classes
:return: self
"""
self._create_placeholders(n_features, n_classes)
self._create_variables(n_features, n_classes)
self.mod_y = tf.nn.softmax(
tf.add(tf.matmul(self.input_data, self.W_), self.b_))
self.cost = self.loss.compile(self.mod_y, self.input_labels)
self.train_step = tf.train.GradientDescentOptimizer(
self.learning_rate).minimize(self.cost)
self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)
def _create_placeholders(self, n_features, n_classes):
"""Create the TensorFlow placeholders for the model.
:param n_features: number of features
:param n_classes: number of classes
:return: self
"""
self.input_data = tf.placeholder(
tf.float32, [None, n_features], name='x-input')
self.input_labels = tf.placeholder(
tf.float32, [None, n_classes], name='y-input')
self.keep_prob = tf.placeholder(
tf.float32, name='keep-probs')
def _create_variables(self, n_features, n_classes):
"""Create the TensorFlow variables for the model.
:param n_features: number of features
:param n_classes: number of classes
:return: self
"""
self.W_ = tf.Variable(
tf.zeros([n_features, n_classes]), name='weights')
self.b_ = tf.Variable(
tf.zeros([n_classes]), name='biases')
def _train_model(self, train_set, train_labels,
validation_set, validation_labels):
"""Train the model.
:param train_set: training set
:param train_labels: training labels
:param validation_set: validation set
:param validation_labels: validation labels
:return: self
"""
pbar = tqdm(range(self.num_epochs))
for i in pbar:
shuff = list(zip(train_set, train_labels))
np.random.shuffle(shuff)
batches = [_ for _ in utilities.gen_batches(shuff, self.batch_size)]
for batch in batches:
x_batch, y_batch = zip(*batch)
self.tf_session.run(
self.train_step,
feed_dict={self.input_data: x_batch,
self.input_labels: y_batch})
if validation_set is not None:
feed = {self.input_data: validation_set,
self.input_labels: validation_labels}
acc = tf_utils.run_summaries(
self.tf_session, self.tf_merged_summaries,
self.tf_summary_writer, i, feed, self.accuracy)
pbar.set_description("Accuracy: %s" % (acc))
| mit |
moutai/scikit-learn | sklearn/tree/export.py | 14 | 16020 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
benjaminpope/whisky | kergain_sim.py | 2 | 11632 | import numpy as np
import matplotlib.pyplot as plt
import pysco
from pysco.core import *
import fitsio
from k2_epd_george import print_time
from time import time as clock
from old_diffract_tools import *
import pymultinest
from pysco.diffract_tools import shift_image_ft
from pysco.common_tasks import shift_image
from swiftmask import swiftpupil
import matplotlib as mpl
from astropy.table import Table
mpl.rcParams['figure.figsize']=(8.0,6.0) #(6.0,4.0)
mpl.rcParams['font.size']= 18 #10
mpl.rcParams['savefig.dpi']=100 #72
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
shift = np.fft.fftshift
fft = np.fft.fft2
ifft = np.fft.ifft2
fftfreq = np.fft.fftfreq
fftn = np.fft.fftn
rfftn = np.fft.rfftn
dtor = np.pi/180.0
'''------------------------------------------------------------
kergain_sim.py
Automate a simulation of the effectiveness of raw visibility
fitting versus kernel amplitudes
------------------------------------------------------------'''
pupil = 'plain'
try:
a = pysco.kpi('./geometry/'+pupil+'model.pick')
print 'Loaded kernel phase object'
except:
a = pysco.kpi('./geometry/'+pupil+'.txt')
a.name = 'Test'
a.save_to_file('./geometry/'+pupil+'model.pick')
nbuv, nbh = a.nbuv, a.nbh
try:
KerGain = np.loadtxt('KerGain_plain.csv')
print 'Loaded kernel amplitude matrix'
except:
gtfm = np.abs(a.TFM)
U, S, Vh = np.linalg.svd(gtfm.T, full_matrices=1)
S1 = np.zeros(nbuv)
S1[0:nbh-1] = S
nkg = np.size(np.where(abs(S1) < 1e-3))
print nkg
KGCol = np.where(abs(S1) < 1e-3)[0]
KerGain = np.zeros((nkg, nbuv)) # allocate the array
for i in range(nkg):
KerGain[i,:] = (Vh)[KGCol[i],:]
np.savetxt('KerGain_plain.csv',KerGain)
print 'saved'
###-----------------------------------------
### now initialize a simulation
###-----------------------------------------
'''------------------------------
First, set all your parameters.
------------------------------'''
print '\nSimulating a basic PSF'
wavel = 2.5e-6
rprim = 5.093/2.#36903.e-3/2.
rsec= 1.829/2.
pos = [0,0] #m, deg
spaxel = 36.
piston = 0
nimages = 200
reso = rad2mas(wavel/(2*rprim))
print 'Minimum Lambda/D = %.3g mas' % reso
image, imagex = diffract(wavel,rprim,rsec,pos,piston=piston,spaxel=spaxel,seeing=None,verbose=False,\
show_pupil=False,mode=None)
# image = recenter(image,sg_rad=25)
imsz = image.shape[0]
psfs = np.zeros((nimages,imsz,imsz))
show=True
'''----------------------------------------
Loop over a range of contrasts
----------------------------------------'''
contrast_list = [350,400,450,500]#[10,50,100,150,200,250,300]
contrast_list = [10,50,100,150,200,250,300,350,400,450,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3250,3500,3750,4000,4250,4500,4750,5000]
contrast_list = np.linspace(10,6000,30)
# contrast_list = [10,50]
ncalcs = len(contrast_list)
kseps, kthetas, kcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
dkseps, dkthetas, dkcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
vseps, vthetas, vcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
dvseps, dvthetas, dvcons = np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
t0 = clock()
sep, theta = 150, 45
xb,yb = np.cos(theta*np.pi/180)*sep/spaxel, np.sin(theta*np.pi/180)*sep/spaxel
print 'x',xb,',y',yb
amp = 0.1
try:
dummy = fitsio.FITS('psf_cube_scint_%.2f_wavel_%.2f.fits' % (amp,wavel*1e6))
psfs = dummy[0][:,:,:]
print 'Loaded PSFs'
except:
print 'Creating PSFs'
for j in range(nimages):
if j == 0:
verbose = True
else:
verbose = False
psfs[j,:,:], imagex = diffract(wavel,rprim,rsec,pos,piston=piston,spaxel=spaxel,
verbose=verbose,show_pupil=show,mode='amp',
perturbation=None,amp=amp)
fitsio.write('psf_cube_scint_%.2f_wavel_%.2f.fits' % (amp,wavel),psfs)
print 'Saved to psf_cube_scint_%.2f_wavel_%.2f.fits' % (amp,wavel*1e6)
print_time(clock()-t0)
'''----------------------------------------
Initialise pysco with a pupil model
----------------------------------------'''
# meter to pixel conversion factor
scale = 1.0
m2pix = mas2rad(spaxel) * imsz/ wavel * scale
uv_samp = a.uv * m2pix + imsz/2 # uv sample coordinates in pixels
x = a.mask[:,0]
y = a.mask[:,1]
rev = 1
ac = shift(fft(shift(image)))
ac /= (np.abs(ac)).max() / a.nbh
uv_samp_rev=np.cast['int'](np.round(uv_samp))
uv_samp_rev[:,0]*=rev
data_cplx=ac[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2 = np.abs(data_cplx)
vis2 /= vis2.max() #normalise to the origin
'''----------------------------------------
Now loop over simulated binaries
----------------------------------------'''
for trial, contrast in enumerate(contrast_list):
print '\nSimulating for contrast %f' % contrast
thistime = clock()
images = np.zeros((nimages,imsz,imsz))
for j in range(nimages):
images[j,:,:] = np.copy(psfs[j,:,:]) + shift_image_ft(np.copy(psfs[j,:,:]),[-yb,-xb])/contrast#shift_image(psf,x=x,y=y,doRoll=True)/contrast
imsz = images.shape[1]
'''----------------------------------------
Extract Visibilities
----------------------------------------'''
mvis = a.RED/a.RED.max().astype('float')
# kpd_phase = np.angle(data_cplx)/dtor
# kpd_signal = np.dot(a.KerPhi, kpd_phase)
kervises=np.zeros((nimages,KerGain.shape[0]))
vis2s = np.zeros((nimages,vis2.shape[0]))
kpd_signals = np.zeros((nimages,a.KerPhi.shape[0]))
phases = np.zeros((nimages,vis2.shape[0]))
randomGain = np.random.randn(np.shape(KerGain)[0],np.shape(KerGain)[1])
for j in range(nimages):
image2 = images[j,:,:]
ac2 = shift(fft(shift(image2)))
ac2 /= (np.abs(ac2)).max() / a.nbh
data_cplx2=ac2[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2b = np.abs(data_cplx2)
vis2b /= vis2b.max() #normalise to the origin
vis2s[j,:]=vis2b
# log_data_complex_b = np.log(np.abs(data_cplx2))+1.j*np.angle(data_cplx2)
phases[j,:] = np.angle(data_cplx2)/dtor
kervises[j,:] = np.dot(KerGain,vis2b/vis2-1.)
# kervises[j,:] = np.dot(randomGain, np.sqrt(vis2b)-mvis)
# kpd_signals[j,:] = np.dot(a.KerPhi,np.angle(data_cplx2))/dtor
# kercomplexb = np.dot(KerBispect,log_data_complex_b)
# kervises_cplx[j,:] = np.abs(kercomplexb)
'''----------------------------------------
Now Model
----------------------------------------'''
paramlimits = [50.,300.,30.,60.,contrast/3.,contrast*3.]
hdr = {'tel':'HST',
'filter':wavel,
'orient':0}
def myprior(cube, ndim, n_params,paramlimits=paramlimits):
cube[0] = (paramlimits[1] - paramlimits[0])*cube[0]+paramlimits[0]
cube[1] = (paramlimits[3] - paramlimits[2])*cube[1]+paramlimits[2]
for j in range(2,ndim):
cube[j] = (paramlimits[5] - paramlimits[4])*cube[j]+paramlimits[4]
def kg_loglikelihood(cube,kgd,kge,kpi):
'''Calculate chi2 for single band kernel amplitude data.
Used both in the MultiNest and MCMC Hammer implementations.'''
vises = np.sqrt(pysco.binary_model(cube[0:3],kpi,hdr,vis2=True))
kergains = np.dot(KerGain,vises-1.)
chi2 = np.sum(((kgd-kergains)/kge)**2)
return -chi2/2.
def vis_loglikelihood(cube,vdata,ve,kpi):
'''Calculate chi2 for single band vis2 data.
Used both in the MultiNest and MCMC Hammer implementations.'''
vises = pysco.binary_model(cube[0:3],kpi,hdr,vis2=True)
chi2 = np.sum(((vdata-vises)/ve)**2)
return -chi2/2.
'''-----------------------------------------------
First do kernel amplitudes
-----------------------------------------------'''
my_observable = np.mean(kervises,axis=0)
# else:
# my_observable = kervises[frame+1,:]
addederror = 0.00001 # in case there are bad frames
my_error = np.sqrt(np.std(kervises,axis=0)**2+addederror**2)
print 'Error:', my_error
def myloglike_kg(cube,ndim,n_params):
try:
loglike = kg_loglikelihood(cube,my_observable,my_error,a)
# loglike = vis_loglikelihood(cube,my_observable,my_error,a)
return loglike
except:
return -np.inf
parameters = ['Separation','Position Angle','Contrast']
n_params = len(parameters)
resume=False
eff=0.3
multi=True,
max_iter= 0
ndim = n_params
pymultinest.run(myloglike_kg, myprior, n_params, wrapped_params=[1],
verbose=True,resume=False)
thing = pymultinest.Analyzer(n_params = n_params)
s = thing.get_stats()
this_j = trial
kseps[this_j], dkseps[this_j] = s['marginals'][0]['median'], s['marginals'][0]['sigma']
kthetas[this_j], dkthetas[this_j] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
kcons[this_j], dkcons[this_j] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
stuff = thing.get_best_fit()
best_params = stuff['parameters']
print 'Best parameters:',best_params
model_vises = np.sqrt(pysco.binary_model(best_params,a,hdr,vis2=True))
model_kervises = np.dot(KerGain,model_vises-1.)
plt.clf()
plt.errorbar(my_observable,model_kervises,xerr=my_error,color='k',
ls='',markersize=10,linewidth=2.5)
plt.xlabel('Measured Kernel Amplitudes')
plt.ylabel('Model Kernel Amplitudes')
plt.title('Model Fit: Kernel Amplitudes, Contrast %.1f' % contrast)
plt.savefig('kpfit_bin_%.1f_con.png' % contrast)
print 'Kernel amplitudes done'
print_time(clock()-thistime)
print ''
'''-----------------------------------------------
Now do visibilities
-----------------------------------------------'''
my_observable = np.mean((vis2s/vis2)**2,axis=0)
# else:
# my_observable = (vis2s[frame+1,:]/vis2)**2
print '\nDoing raw visibilities'
addederror = 0.0001
my_error = np.sqrt(np.std((vis2s/vis2)**2,axis=0)**2+addederror**2)
print 'Error:', my_error
def myloglike_vis(cube,ndim,n_params):
# loglike = kg_loglikelihood(cube,my_observable,my_error,a)
try:
loglike = vis_loglikelihood(cube,my_observable,my_error,a)
return loglike
except:
return -np.inf
thistime = clock()
pymultinest.run(myloglike_vis, myprior, n_params, wrapped_params=[1],
verbose=True,resume=False)
thing = pymultinest.Analyzer(n_params = n_params)
s = thing.get_stats()
this_j = trial
vseps[this_j], dvseps[this_j] = s['marginals'][0]['median'], s['marginals'][0]['sigma']
vthetas[this_j], dvthetas[this_j] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
vcons[this_j], dvcons[this_j] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
stuff = thing.get_best_fit()
best_params = stuff['parameters']
print 'Best parameters:',best_params
model_vises = pysco.binary_model(best_params,a,hdr,vis2=True)
plt.clf()
plt.errorbar(my_observable,model_vises,xerr=my_error,color='k',
ls='',markersize=10,linewidth=2.5)
plt.xlabel('Measured Visibilities')
plt.ylabel('Model Visibilities')
plt.title('Model Fit: Visibilities, Contrast %.1f' % contrast)
plt.savefig('vis2_bin_new_%.1f_con.png' % contrast)
print 'Visibilities done'
print_time(clock()-thistime)
'''------------------------------------
Now save!
------------------------------------'''
cmin, cmax = np.min(contrast_list), np.max(contrast_list)
vdata = Table({'Seps':vseps,
'Thetas':vthetas,
'Cons':vcons,
'Dseps':dvseps,
'Dthetas':dvthetas,
'Dcons':dvcons})
vdata.write('raw_vis_sims_new_%.0f_%.0f.csv' % (cmin,cmax))
print 'Visibility fits saved to raw_vis_sims_new_%.0f_%.0f.csv' % (cmin,cmax)
kdata = Table({'Seps':kseps,
'Thetas':kthetas,
'Cons':kcons,
'Dseps':dkseps,
'Dthetas':dkthetas,
'Dcons':dkcons})
kdata.write('kernel_amplitude_sims_new_%.0f_%.0f.csv' % (cmin,cmax))
print 'Kernel amplitude fits saved to kernel_amplitude_sims_new_%.0f_%.0f.csv' \
% (cmin,cmax)
print 'Finished contrast loop'
print_time(clock()-t0) | gpl-3.0 |
EclipseXuLu/DataHouse | DataHouse/tokendata/tokendata_fetcher.py | 1 | 4621 | import json
import time
from pprint import pprint
import pandas as pd
import requests
def precess_token_sales(json_str_path):
result = []
with open(json_str_path, mode='rt') as f:
json_obj = json.load(f)
index = 0
for _ in json_obj['data']:
print('*' * 100)
print(index)
pprint(_)
index += 1
print('*' * 100)
try:
result.append([_['_id'], _['name'], _['description'], _['symbol'], _['status'], _['usd_raised'],
_['month'],
time.ctime(_['start_date']).replace('08:00:00', '') if _['start_date']
not in ['', '#N/A'] else '',
time.ctime(_['end_date']).replace('08:00:00', '') if _['end_date']
not in ['', '#N/A'] else '',
_['token_sale_price'],
_['current_token_price'], _['token_return'], _['whitepaper']])
except:
result.append([_['_id'], _['name'], _['description'], '', _['status'], _['usd_raised'],
_['month'],
time.ctime(_['start_date']).replace('08:00:00', '') if _['start_date']
not in ['', 'N/A'] else '',
time.ctime(_['end_date']).replace('08:00:00', '') if _['end_date']
not in ['', 'N/A'] else '',
_['token_sale_price'],
_['current_token_price'], _['token_return'], _['whitepaper']])
cols = ['id', 'name', 'description', 'symbol', 'status', 'usd_raised', 'month', 'start_date', 'end_date',
'token_sale_price', 'current_token_price', 'token_return', 'whitepaper']
df = pd.DataFrame(result, columns=cols)
df.to_excel("./TokenData.xlsx", sheet_name='TOKEN SALES', index=False)
print('processing done!')
def crawl_and_process_token_sales():
result = []
response = requests.get('https://www.tokendata.io/icos?_=1531038915858', timeout=20)
if response.status_code == 200:
json_obj = response.json()
index = 0
for _ in json_obj['data']:
print('*' * 100)
print(index)
pprint(_)
index += 1
print('*' * 100)
try:
result.append([_['_id'], _['name'], _['description'], _['symbol'], _['status'], _['usd_raised'],
_['month'],
time.ctime(_['start_date']).replace('08:00:00', '') if _['start_date']
not in ['', '#N/A'] else '',
time.ctime(_['end_date']).replace('08:00:00', '') if _['end_date']
not in ['', '#N/A'] else '',
_['token_sale_price'],
_['current_token_price'], _['token_return'], _['whitepaper']])
except:
result.append([_['_id'], _['name'], _['description'], '', _['status'], _['usd_raised'],
_['month'],
time.ctime(_['start_date']).replace('08:00:00', '') if _['start_date']
not in ['', 'N/A'] else '',
time.ctime(_['end_date']).replace('08:00:00', '') if _['end_date']
not in ['', 'N/A'] else '',
_['token_sale_price'],
_['current_token_price'], _['token_return'], _['whitepaper']])
cols = ['id', 'name', 'description', 'symbol', 'status', 'usd_raised', 'month', 'start_date', 'end_date',
'token_sale_price', 'current_token_price', 'token_return', 'whitepaper']
df = pd.DataFrame(result, columns=cols)
df.to_excel("./TokenData.xlsx", sheet_name='TOKEN SALES', index=False)
print('processing done!')
if __name__ == '__main__':
# precess_token_sales("./data.json")
crawl_and_process_token_sales()
| mit |
hdmetor/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
peastman/msmbuilder | msmbuilder/project_templates/tica/tica-sample-coordinate-plot.py | 9 | 1174 | """Plot the result of sampling a tICA coordinate
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta, ttrajs = load_trajs('ttrajs')
txx = np.concatenate(list(ttrajs.values()))
inds = load_generic("tica-dimension-0-inds.pickl")
straj = []
for traj_i, frame_i in inds:
straj += [ttrajs[traj_i][frame_i, :]]
straj = np.asarray(straj)
## Overlay sampled trajectory on histogram
def plot_sampled_traj(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap='magma_r',
mincnt=1,
bins='log',
alpha=0.8,
)
ax.plot(straj[:, 0], straj[:, 1], 'o-', label='Sampled')
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
ax.legend(loc='best')
## Plot
fig, ax = plt.subplots(figsize=(7, 5))
plot_sampled_traj(ax)
fig.tight_layout()
fig.savefig('tica-dimension-0-heatmap.pdf')
# {{xdg_open('tica-dimension-0-heatmap.pdf')}}
| lgpl-2.1 |
KarrLab/obj_model | obj_model/io.py | 1 | 96862 | """ Reading/writing schema objects to/from files
* Comma separated values (.csv)
* Excel (.xlsx)
* JavaScript Object Notation (.json)
* Tab separated values (.tsv)
* Yet Another Markup Language (.yaml, .yml)
:Author: Jonathan Karr <karr@mssm.edu>
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Date: 2016-11-23
:Copyright: 2016, Karr Lab
:License: MIT
"""
import abc
import collections
import copy
import importlib
import inspect
import json
import obj_model
import os
import pandas
import re
import six
import stringcase
import wc_utils.workbook.io
import yaml
from datetime import datetime
from itertools import chain, compress
from natsort import natsorted, ns
from os.path import basename, dirname, splitext
from warnings import warn
from obj_model import utils
from obj_model.core import (Model, Attribute, RelatedAttribute, Validator, TabularOrientation,
InvalidObject, excel_col_name,
InvalidAttribute, ObjModelWarning,
TOC_NAME, SBTAB_TOC_NAME,
SCHEMA_NAME, SBTAB_SCHEMA_NAME)
from wc_utils.util.list import transpose, det_dedupe, is_sorted, dict_by_class
from wc_utils.util.misc import quote
from wc_utils.util.string import indent_forest
from wc_utils.util import git
from wc_utils.workbook.core import get_column_letter, Formula
from wc_utils.workbook.io import WorkbookStyle, WorksheetStyle, Hyperlink, WorksheetValidation, WorksheetValidationOrientation
SBTAB_DEFAULT_READER_OPTS = {
'ignore_missing_sheets': True,
'ignore_extra_sheets': True,
'ignore_sheet_order': True,
'ignore_missing_attributes': True,
'ignore_extra_attributes': True,
'ignore_attribute_order': True,
}
class WriterBase(six.with_metaclass(abc.ABCMeta, object)):
""" Interface for classes which write model objects to file(s)
Attributes:
MODELS (:obj:`tuple` of :obj:`type`): default types of models to export and the order in which
to export them
"""
MODELS = ()
@abc.abstractmethod
def run(self, path, objects, model_metadata=None, models=None,
get_related=True, include_all_attributes=True, validate=True,
title=None, description=None, keywords=None, version=None, language=None, creator=None,
toc=True, extra_entries=0, data_repo_metadata=False, schema_package=None,
sbtab=False):
""" Write a list of model classes to an Excel file, with one worksheet for each model, or to
a set of .csv or .tsv files, with one file for each model.
Args:
path (:obj:`str`): path to write file(s)
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): object or list of objects
model_metadata (:obj:`dict`): dictionary that maps models to dictionary with their metadata to
be saved to header row (e.g., `!!ObjModel ...`)
models (:obj:`list` of :obj:`Model`, optional): models
get_related (:obj:`bool`, optional): if :obj:`True`, write object and all related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
title (:obj:`str`, optional): title
description (:obj:`str`, optional): description
keywords (:obj:`str`, optional): keywords
version (:obj:`str`, optional): version
language (:obj:`str`, optional): language
creator (:obj:`str`, optional): creator
toc (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with table of contents
extra_entries (:obj:`int`, optional): additional entries to display
data_repo_metadata (:obj:`bool`, optional): if :obj:`True`, try to write metadata information
about the file's Git repo; a warning will be generated if the repo repo is not
current with origin, except for the file
schema_package (:obj:`str`, optional): the package which defines the `obj_model` schema
used by the file; if not :obj:`None`, try to write metadata information about the
the schema's Git repository: the repo must be current with origin
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
pass # pragma: no cover
def make_metadata_objects(self, data_repo_metadata, path, schema_package):
""" Make models that store Git repository metadata
Metadata models can only be created from suitable Git repos.
Failures to obtain metadata are reported as warnings that do not interfeer with writing
data files.
Args:
data_repo_metadata (:obj:`bool`): if :obj:`True`, try to obtain metadata information
about the Git repo containing `path`; the repo must be current with origin, except
for the file at `path`
path (:obj:`str`): path of the file(s) that will be written
schema_package (:obj:`str`, optional): the package which defines the `obj_model` schema
used by the file; if not :obj:`None`, try to obtain metadata information about the
the schema's Git repository from a package on `sys.path`: the repo must be current
with its origin
Returns:
:obj:`list` of :obj:`Model`: metadata objects(s) created
"""
metadata_objects = []
if data_repo_metadata:
# create DataRepoMetadata instance
try:
data_repo_metadata_obj = utils.DataRepoMetadata()
unsuitable_changes = utils.set_git_repo_metadata_from_path(data_repo_metadata_obj,
git.RepoMetadataCollectionType.DATA_REPO, path=path)
metadata_objects.append(data_repo_metadata_obj)
if unsuitable_changes:
warn("Git repo metadata for data repo was obtained; "
"Ensure that the data file '{}' doesn't depend on these changes in the git "
"repo containing it:\n{}".format(path, '\n'.join(unsuitable_changes)), IoWarning)
except ValueError as e:
warn("Cannot obtain git repo metadata for data repo containing: '{}':\n{}".format(
path, str(e)), IoWarning)
if schema_package:
# create SchemaRepoMetadata instance
try:
schema_repo_metadata = utils.SchemaRepoMetadata()
spec = importlib.util.find_spec(schema_package)
if not spec:
raise ValueError("package '{}' not found".format(schema_package))
unsuitable_changes = utils.set_git_repo_metadata_from_path(schema_repo_metadata,
git.RepoMetadataCollectionType.SCHEMA_REPO,
path=spec.origin)
if unsuitable_changes:
raise ValueError("Cannot gather metadata for schema repo from Git repo "
"containing '{}':\n{}".format(path, '\n'.join(unsuitable_changes)))
metadata_objects.append(schema_repo_metadata)
except ValueError as e:
warn("Cannot obtain git repo metadata for schema repo '{}' used by data file: '{}':\n{}".format(
schema_package, path, str(e)), IoWarning)
return metadata_objects
class JsonWriter(WriterBase):
""" Write model objects to a JSON or YAML file """
def run(self, path, objects, model_metadata=None, models=None, get_related=True, include_all_attributes=True, validate=True,
title=None, description=None, keywords=None, version=None, language=None, creator=None,
toc=False, extra_entries=0, data_repo_metadata=False, schema_package=None,
sbtab=False):
""" Write a list of model classes to a JSON or YAML file
Args:
path (:obj:`str`): path to write file(s)
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): object or list of objects
model_metadata (:obj:`dict`): dictionary that maps models to dictionary with their metadata to
be saved to header row (e.g., `!!ObjModel ...`)
models (:obj:`list` of :obj:`Model`, optional): models
get_related (:obj:`bool`, optional): if :obj:`True`, write object and all related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
title (:obj:`str`, optional): title
description (:obj:`str`, optional): description
keywords (:obj:`str`, optional): keywords
version (:obj:`str`, optional): version
language (:obj:`str`, optional): language
creator (:obj:`str`, optional): creator
toc (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with table of contents
extra_entries (:obj:`int`, optional): additional entries to display
data_repo_metadata (:obj:`bool`, optional): if :obj:`True`, try to write metadata information
about the file's Git repo; the repo must be current with origin, except for the file
schema_package (:obj:`str`, optional): the package which defines the `obj_model` schema
used by the file; if not :obj:`None`, try to write metadata information about the
the schema's Git repository: the repo must be current with origin
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Raises:
:obj:`ValueError`: if model names are not unique or output format is not supported
"""
if models is None:
models = self.MODELS
if isinstance(models, (list, tuple)):
models = list(models)
else:
models = [models]
if not include_all_attributes:
warn('`include_all_attributes=False` has no effect', IoWarning)
# validate
if objects and validate:
error = Validator().run(objects, get_related=get_related)
if error:
warn('Some data will not be written because objects are not valid:\n {}'.format(
str(error).replace('\n', '\n ').rstrip()), IoWarning)
# create metadata objects
metadata_objects = self.make_metadata_objects(data_repo_metadata, path, schema_package)
if metadata_objects:
# put metadata instances at start of objects
objects = metadata_objects + objects
# convert object(s) (and their relatives) to Python dicts and lists
if objects is None:
json_objects = None
elif isinstance(objects, (list, tuple)):
json_objects = []
encoded = {}
for obj in objects:
json_objects.append(obj.to_dict(encoded=encoded))
models.append(obj.__class__)
else:
json_objects = objects.to_dict()
models.append(objects.__class__)
# check that model names are unique so that objects will be decodable
models = set(models)
models_by_name = {model.__name__: model for model in models}
if len(list(models_by_name.keys())) < len(models):
raise ValueError('Model names must be unique to decode objects')
# save plain Python object to JSON or YAML
_, ext = splitext(path)
ext = ext.lower()
with open(path, 'w') as file:
if ext == '.json':
json.dump(json_objects, file)
elif ext in ['.yaml', '.yml']:
yaml.dump(json_objects, file, default_flow_style=False)
else:
raise ValueError('Unsupported format {}'.format(ext))
class WorkbookWriter(WriterBase):
""" Write model objects to an Excel file or CSV or TSV file(s)
"""
def run(self, path, objects, model_metadata=None, models=None, get_related=True, include_all_attributes=True, validate=True,
title=None, description=None, keywords=None, version=None, language=None, creator=None,
toc=True, extra_entries=0, data_repo_metadata=False, schema_package=None,
sbtab=False):
""" Write a list of model instances to an Excel file, with one worksheet for each model class,
or to a set of .csv or .tsv files, with one file for each model class
Args:
path (:obj:`str`): path to write file(s)
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): `model` instance or list of `model` instances
model_metadata (:obj:`dict`): dictionary that maps models to dictionary with their metadata to
be saved to header row (e.g., `!!ObjModel ...`)
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear as worksheets; all models which are not in `models` will
follow in alphabetical order
get_related (:obj:`bool`, optional): if :obj:`True`, write `objects` and all their related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
title (:obj:`str`, optional): title
description (:obj:`str`, optional): description
keywords (:obj:`str`, optional): keywords
version (:obj:`str`, optional): version
language (:obj:`str`, optional): language
creator (:obj:`str`, optional): creator
toc (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with table of contents
extra_entries (:obj:`int`, optional): additional entries to display
data_repo_metadata (:obj:`bool`, optional): if :obj:`True`, try to write metadata information
about the file's Git repo; the repo must be current with origin, except for the file
schema_package (:obj:`str`, optional): the package which defines the `obj_model` schema
used by the file; if not :obj:`None`, try to write metadata information about the
the schema's Git repository: the repo must be current with origin
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Raises:
:obj:`ValueError`: if no model is provided or a class cannot be serialized
"""
if objects is None:
objects = []
elif not isinstance(objects, (list, tuple)):
objects = [objects]
model_metadata = model_metadata or {}
# get related objects
all_objects = objects
if get_related:
all_objects = Model.get_all_related(objects)
if validate:
error = Validator().run(all_objects)
if error:
warn('Some data will not be written because objects are not valid:\n {}'.format(
str(error).replace('\n', '\n ').rstrip()), IoWarning)
# create metadata objects
metadata_objects = self.make_metadata_objects(data_repo_metadata, path, schema_package)
if metadata_objects:
all_objects.extend(metadata_objects)
# put metadata models at start of model list
models = [obj.__class__ for obj in metadata_objects] + list(models)
# group objects by class
grouped_objects = dict_by_class(all_objects)
# check that at least one model was provided
if models is None:
models = self.MODELS
if isinstance(models, (list, tuple)):
models = list(models)
else:
models = [models]
for model in grouped_objects.keys():
if model not in models:
models.append(model)
models = list(filter(lambda model: model.Meta.table_format not in [
TabularOrientation.cell, TabularOrientation.multiple_cells], models))
if not models:
raise ValueError('At least one `Model` must be provided')
# check that models can be unambiguously mapped to worksheets
sheet_names = []
for model in models:
if model.Meta.table_format == TabularOrientation.row:
sheet_names.append(model.Meta.verbose_name_plural)
else:
sheet_names.append(model.Meta.verbose_name)
ambiguous_sheet_names = WorkbookReader.get_ambiguous_sheet_names(sheet_names, models)
if ambiguous_sheet_names:
msg = 'The following sheets cannot be unambiguously mapped to models:'
for sheet_name, models in ambiguous_sheet_names.items():
msg += '\n {}: {}'.format(sheet_name, ', '.join(model.__name__ for model in models))
raise ValueError(msg)
# check that models are serializble
for cls in grouped_objects.keys():
if not cls.is_serializable():
raise ValueError('Class {}.{} cannot be serialized'.format(cls.__module__, cls.__name__))
# get neglected models
unordered_models = natsorted(set(grouped_objects.keys()).difference(set(models)),
lambda model: model.Meta.verbose_name, alg=ns.IGNORECASE)
# initialize workbook
_, ext = splitext(path)
writer_cls = wc_utils.workbook.io.get_writer(ext)
writer = writer_cls(path,
title=title, description=description, keywords=keywords,
version=version, language=language, creator=creator)
writer.initialize_workbook()
# add table of contents to workbook
all_models = models + unordered_models
if toc:
self.write_toc(writer, all_models, grouped_objects, sbtab=sbtab)
# add sheets to workbook
sheet_models = list(filter(lambda model: model.Meta.table_format not in [
TabularOrientation.cell, TabularOrientation.multiple_cells], all_models))
encoded = {}
for model in sheet_models:
if model in grouped_objects:
objects = grouped_objects[model]
else:
objects = []
self.write_model(writer, model, objects, model_metadata.get(model, {}),
sheet_models, include_all_attributes=include_all_attributes, encoded=encoded,
extra_entries=extra_entries, sbtab=sbtab)
# finalize workbook
writer.finalize_workbook()
def write_toc(self, writer, models, grouped_objects, sbtab=False):
""" Write a worksheet with a table of contents
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear in the table of contents
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
if sbtab:
sheet_name = '!' + SBTAB_TOC_NAME
format = 'SBtab'
table_id = SBTAB_TOC_NAME
version = '2.0'
headings = ['!Table', '!Description', '!NumberOfObjects']
else:
sheet_name = TOC_NAME
format = 'ObjModel'
table_id = TOC_NAME
version = obj_model.__version__
headings = ['Table', 'Description', 'Number of objects']
now = datetime.now()
metadata = ["!!{}".format(format),
"TableID='{}'".format(table_id),
"TableName='{}'".format(table_id[0].upper() + table_id[1:].lower()),
"Description='Table/model and column/attribute definitions'",
"Date='{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}'".format(
now.year, now.month, now.day, now.hour, now.minute, now.second),
"{}Version='{}'".format(format, version),
]
content = [
[' '.join(metadata)],
headings,
]
hyperlinks = []
for i_model, model in enumerate(models):
if model.Meta.table_format in [TabularOrientation.cell, TabularOrientation.multiple_cells]:
continue
if model.Meta.table_format == TabularOrientation.row:
ws_name = model.Meta.verbose_name_plural
else:
ws_name = model.Meta.verbose_name
hyperlinks.append(Hyperlink(i_model + 1, 0,
"internal:'{}'!A1".format(ws_name),
tip='Click to view {}'.format(ws_name.lower())))
if sbtab:
ws_display_name = ws_name[1:]
else:
ws_display_name = ws_name
has_multiple_cells = False
for attr in model.Meta.attributes.values():
if isinstance(attr, RelatedAttribute) and \
attr.related_class.Meta.table_format == TabularOrientation.multiple_cells:
has_multiple_cells = True
break
if model.Meta.table_format == TabularOrientation.row:
range = 'A{}:A{}'.format(3 + has_multiple_cells, 2 ** 20)
else:
range = '{}2:{}2'.format(get_column_letter(2 + has_multiple_cells),
get_column_letter(2 ** 14))
content.append([
ws_display_name,
model.Meta.description,
Formula("=COUNTA('{}'!{})".format(ws_name, range),
len(grouped_objects.get(model, []))),
])
style = WorksheetStyle(
title_rows=1,
head_rows=1,
extra_rows=0,
extra_columns=0,
hyperlinks=hyperlinks,
)
writer.write_worksheet(sheet_name, content, style=style)
def write_model(self, writer, model, objects, model_metadata, sheet_models, include_all_attributes=True, encoded=None, extra_entries=0,
sbtab=False):
""" Write a list of model objects to a file
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
model (:obj:`type`): model
objects (:obj:`list` of :obj:`Model`): list of instances of `model`
model_metadata (:obj:`dict`): dictionary of model metadata
sheet_models (:obj:`list` of :obj:`Model`): models encoded as separate sheets
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes
including those not explictly included in `Model.Meta.attribute_order`
encoded (:obj:`dict`, optional): objects that have already been encoded and their assigned JSON identifiers
extra_entries (:obj:`int`, optional): additional entries to display
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
attrs, _, headings, merge_ranges, field_validations, metadata_headings = get_fields(
model, model_metadata,
include_all_attributes=include_all_attributes,
sheet_models=sheet_models,
sbtab=sbtab)
# objects
model.sort(objects)
data = []
for obj in objects:
# comments
for comment in obj._comments:
data.append(['% ' + comment])
# properties
obj_data = []
for attr in attrs:
val = getattr(obj, attr.name)
if isinstance(attr, RelatedAttribute):
if attr.related_class.Meta.table_format == TabularOrientation.multiple_cells:
sub_attrs = get_ordered_attributes(attr.related_class, include_all_attributes=include_all_attributes)
for sub_attr in sub_attrs:
if val:
sub_val = getattr(val, sub_attr.name)
if isinstance(sub_attr, RelatedAttribute):
obj_data.append(sub_attr.serialize(sub_val, encoded=encoded))
else:
obj_data.append(sub_attr.serialize(sub_val))
else:
obj_data.append(None)
else:
obj_data.append(attr.serialize(getattr(obj, attr.name), encoded=encoded))
else:
obj_data.append(attr.serialize(getattr(obj, attr.name)))
data.append(obj_data)
# validations
if model.Meta.table_format == TabularOrientation.column:
field_validations = [None] * len(metadata_headings) + field_validations
validation = WorksheetValidation(orientation=WorksheetValidationOrientation[model.Meta.table_format.name],
fields=field_validations)
self.write_sheet(writer, model, data, headings, metadata_headings, validation,
extra_entries=extra_entries, merge_ranges=merge_ranges, sbtab=sbtab)
def write_sheet(self, writer, model, data, headings, metadata_headings, validation,
extra_entries=0, merge_ranges=None, sbtab=False):
""" Write data to sheet
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
model (:obj:`type`): model
data (:obj:`list` of :obj:`list` of :obj:`object`): list of list of cell values
headings (:obj:`list` of :obj:`list` of :obj:`str`): list of list of row headingsvalidations
metadata_headings (:obj:`list` of :obj:`list` of :obj:`str`): model metadata (name, description)
to print at the top of the worksheet
validation (:obj:`WorksheetValidation`): validation
extra_entries (:obj:`int`, optional): additional entries to display
merge_ranges (:obj:`list` of :obj:`tuple`): list of ranges of cells to merge
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
style = self.create_worksheet_style(model, extra_entries=extra_entries)
if model.Meta.table_format == TabularOrientation.row:
sheet_name = model.Meta.verbose_name_plural
row_headings = []
column_headings = headings
style.auto_filter = True
style.title_rows = len(metadata_headings)
style.head_rows = len(column_headings)
if merge_ranges:
style.merge_ranges = merge_ranges
else:
style.merge_ranges = []
else:
sheet_name = model.Meta.verbose_name
data = transpose(data)
style.auto_filter = False
row_headings = headings
column_headings = []
style.title_rows = len(metadata_headings)
style.head_rows = 0
style.head_columns = len(row_headings)
if merge_ranges:
n = len(metadata_headings)
style.merge_ranges = [(start_col + n, start_row - n, end_col + n, end_row - n)
for start_row, start_col, end_row, end_col in merge_ranges]
else:
style.merge_ranges = []
# merge data, headings
for i_row, row_heading in enumerate(transpose(row_headings)):
if i_row < len(data):
row = data[i_row]
else:
row = []
data.append(row)
for val in reversed(row_heading):
row.insert(0, val)
for _ in row_headings:
for column_heading in column_headings:
column_heading.insert(
0, None) # pragma: no cover # unreachable because row_headings and column_headings cannot both be non-empty
content = metadata_headings + column_headings + data
# write content to worksheet
writer.write_worksheet(sheet_name, content, style=style, validation=validation)
@staticmethod
def create_worksheet_style(model, extra_entries=0):
""" Create worksheet style for model
Args:
model (:obj:`type`): model class
extra_entries (:obj:`int`, optional): additional entries to display
Returns:
:obj:`WorksheetStyle`: worksheet style
"""
style = WorksheetStyle(
extra_rows=0,
extra_columns=0,
)
if model.Meta.table_format == TabularOrientation.row:
style.extra_rows = extra_entries
else:
style.extra_columns = extra_entries
return style
class PandasWriter(WorkbookWriter):
""" Write model instances to a dictionary of :obj:`pandas.DataFrame`
Attributes:
_data_frames (:obj:`dict`): dictionary that maps models (:obj:`Model`)
to their instances (:obj:`pandas.DataFrame`)
"""
def __init__(self):
self._data_frames = None
def run(self, objects, models=None, get_related=True,
include_all_attributes=True, validate=True,
sbtab=False):
""" Write model instances to a dictionary of :obj:`pandas.DataFrame`
Args:
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): object or list of objects
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear as worksheets; all models which are not in `models` will
follow in alphabetical order
get_related (:obj:`bool`, optional): if :obj:`True`, write `objects` and all their related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`dict`: dictionary that maps models (:obj:`Model`) to their
instances (:obj:`pandas.DataFrame`)
"""
self._data_frames = {}
super(PandasWriter, self).run('*.csv', objects,
models=models,
get_related=get_related,
include_all_attributes=include_all_attributes,
validate=validate,
toc=False,
sbtab=sbtab)
return self._data_frames
def write_sheet(self, writer, model, data, headings, metadata_headings, validation,
extra_entries=0, merge_ranges=None, sbtab=False):
""" Write data to sheet
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
model (:obj:`type`): model
data (:obj:`list` of :obj:`list` of :obj:`object`): list of list of cell values
headings (:obj:`list` of :obj:`list` of :obj:`str`): list of list of row headingsvalidations
metadata_headings (:obj:`list` of :obj:`list` of :obj:`str`): model metadata (name, description)
to print at the top of the worksheet
validation (:obj:`WorksheetValidation`): validation
extra_entries (:obj:`int`, optional): additional entries to display
merge_ranges (:obj:`list` of :obj:`tuple`): list of ranges of cells to merge
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
if len(headings) == 1:
columns = []
for h in headings[0]:
if sbtab and h.startswith('!'):
columns.append(h[1:])
else:
columns.append(h)
else:
for row in headings:
for i_cell, cell in enumerate(row):
if sbtab and isinstance(cell, str) and cell.startswith('!'):
row[i_cell] = cell[1:]
columns = pandas.MultiIndex.from_tuples(transpose(headings))
self._data_frames[model] = pandas.DataFrame(data, columns=columns)
class Writer(WriterBase):
""" Write a list of model objects to file(s) """
@staticmethod
def get_writer(path):
""" Get writer
Args:
path (:obj:`str`): path to write file(s)
Returns:
:obj:`type`: writer class
Raises:
:obj:`ValueError`: if extension is not supported
"""
_, ext = splitext(path)
ext = ext.lower()
if ext in ['.csv', '.tsv', '.xlsx']:
return WorkbookWriter
elif ext in ['.json', '.yaml', '.yml']:
return JsonWriter
else:
raise ValueError('Invalid export format: {}'.format(ext))
def run(self, path, objects, model_metadata=None, models=None, get_related=True, include_all_attributes=True, validate=True,
title=None, description=None, keywords=None, version=None, language=None, creator=None,
toc=True, extra_entries=0, data_repo_metadata=False, schema_package=None,
sbtab=False):
""" Write a list of model classes to an Excel file, with one worksheet for each model, or to
a set of .csv or .tsv files, with one file for each model.
Args:
path (:obj:`str`): path to write file(s)
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): object or list of objects
model_metadata (:obj:`dict`): dictionary that maps models to dictionary with their metadata to
be saved to header row (e.g., `!!ObjModel ...`)
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear as worksheets; all models which are not in `models` will
follow in alphabetical order
get_related (:obj:`bool`, optional): if :obj:`True`, write `objects` and all related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
title (:obj:`str`, optional): title
description (:obj:`str`, optional): description
keywords (:obj:`str`, optional): keywords
version (:obj:`str`, optional): version
language (:obj:`str`, optional): language
creator (:obj:`str`, optional): creator
toc (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with table of contents
extra_entries (:obj:`int`, optional): additional entries to display
data_repo_metadata (:obj:`bool`, optional): if :obj:`True`, try to write metadata information
about the file's Git repo; the repo must be current with origin, except for the file
schema_package (:obj:`str`, optional): the package which defines the `obj_model` schema
used by the file; if not :obj:`None`, try to write metadata information about the
the schema's Git repository: the repo must be current with origin
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
Writer = self.get_writer(path)
Writer().run(path, objects, model_metadata=model_metadata, models=models, get_related=get_related,
include_all_attributes=include_all_attributes, validate=validate,
title=title, description=description, keywords=keywords,
language=language, creator=creator, toc=toc, extra_entries=extra_entries,
data_repo_metadata=data_repo_metadata, schema_package=schema_package,
sbtab=sbtab)
class ReaderBase(six.with_metaclass(abc.ABCMeta, object)):
""" Interface for classes which write model objects to file(s)
Attributes:
_model_metadata (:obj:`dict`): dictionary which maps models (:obj:`Model`) to dictionaries of
metadata read from a document (e.g., `!!ObjModel Date='...' ...`)
MODELS (:obj:`tuple` of :obj:`type`): default types of models to export and the order in which
to export them
"""
MODELS = ()
def __init__(self):
self._model_metadata = None
@abc.abstractmethod
def run(self, path, models=None,
ignore_missing_sheets=False, ignore_extra_sheets=False, ignore_sheet_order=False,
include_all_attributes=True, ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
group_objects_by_model=False, validate=True,
sbtab=False):
""" Read a list of model objects from file(s) and, optionally, validate them
Args:
path (:obj:`str`): path to file(s)
models (:obj:`types.TypeType` or :obj:`list` of :obj:`types.TypeType`, optional): type
of object to read or list of types of objects to read
ignore_missing_sheets (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_sheets (:obj:`bool`, optional): if :obj:`True` and all `models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in `models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided
in the canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
group_objects_by_model (:obj:`bool`, optional): if :obj:`True`, group decoded objects by their
types
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`dict`: model objects grouped by `Model` class
"""
pass # pragma: no cover
class JsonReader(ReaderBase):
""" Read model objects from a JSON or YAML file """
def run(self, path, models=None,
ignore_missing_sheets=False, ignore_extra_sheets=False, ignore_sheet_order=False,
include_all_attributes=True, ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
group_objects_by_model=False, validate=True,
sbtab=False):
""" Read model objects from file(s) and, optionally, validate them
Args:
path (:obj:`str`): path to file(s)
models (:obj:`types.TypeType` or :obj:`list` of :obj:`types.TypeType`, optional): type or list
of type of objects to read
ignore_missing_sheets (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_sheets (:obj:`bool`, optional): if :obj:`True` and all `models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in `models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided
in the canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
group_objects_by_model (:obj:`bool`, optional): if :obj:`True`, group decoded objects by their
types
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`dict`: model objects grouped by `Model` class
Raises:
:obj:`ValueError`: if the input format is not supported, model names are not unique, or the
data is invalid
"""
# cast models to list
if models is None:
models = self.MODELS
if not isinstance(models, (list, tuple)):
models = [models]
# read the object into standard Python objects (lists, dicts)
_, ext = splitext(path)
ext = ext.lower()
with open(path, 'r') as file:
if ext == '.json':
json_objs = json.load(file)
elif ext in ['.yaml', '.yml']:
json_objs = yaml.load(file, Loader=yaml.FullLoader)
else:
raise ValueError('Unsupported format {}'.format(ext))
# check that model names are unique so that objects can be decoded
models = set(models)
models_by_name = {model.__name__: model for model in models}
if len(list(models_by_name.keys())) < len(models):
raise ValueError('Model names must be unique to decode objects')
# cast the object(s) to their type
if json_objs is None:
objs = None
elif isinstance(json_objs, list):
objs = []
decoded = {}
for json_obj in json_objs:
obj_type = json_obj.get('__type', None)
model = models_by_name.get(obj_type, None)
if not model:
if ignore_extra_sheets:
continue
else:
raise ValueError('Unsupported type {}'.format(obj_type))
objs.append(model.from_dict(json_obj, decoded=decoded))
objs = det_dedupe(objs)
else:
obj_type = json_objs.get('__type', None)
model = models_by_name.get(obj_type, None)
if model:
objs = model.from_dict(json_objs)
elif ignore_extra_sheets:
objs = None
else:
raise ValueError('Unsupported type {}'.format(obj_type))
# validate
if objs and validate:
if isinstance(objs, list):
to_validate = objs
else:
to_validate = [objs]
errors = Validator().validate(to_validate)
if errors:
raise ValueError(
indent_forest(['The model cannot be loaded because it fails to validate:', [errors]]))
# group objects by model
if group_objects_by_model:
if objs is None:
objs = []
elif not isinstance(objs, list):
objs = [objs]
return dict_by_class(objs)
else:
return objs
class WorkbookReader(ReaderBase):
""" Read model objects from an Excel file or CSV and TSV files """
def run(self, path, models=None,
ignore_missing_sheets=False, ignore_extra_sheets=False, ignore_sheet_order=False,
include_all_attributes=True, ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
group_objects_by_model=True, validate=True,
sbtab=False):
""" Read a list of model objects from file(s) and, optionally, validate them
File(s) may be a single Excel workbook with multiple worksheets or a set of delimeter
separated files encoded by a single path with a glob pattern.
Args:
path (:obj:`str`): path to file(s)
models (:obj:`types.TypeType` or :obj:`list` of :obj:`types.TypeType`, optional): type or list
of type of objects to read
ignore_missing_sheets (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_sheets (:obj:`bool`, optional): if :obj:`True` and all `models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in `models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided
in the canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
group_objects_by_model (:obj:`bool`, optional): if :obj:`True`, group decoded objects by their
types
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`obj`: if `group_objects_by_model` set returns :obj:`dict`: of model objects grouped by `Model` class;
else returns :obj:`list`: of all model objects
Raises:
:obj:`ValueError`: if
* Sheets cannot be unambiguously mapped to models
* The file(s) indicated by :obj:`path` is missing a sheet for a model and
:obj:`ignore_missing_sheets` is :obj:`False`
* The file(s) indicated by :obj:`path` contains extra sheets that don't correspond to one
of `models` and :obj:`ignore_extra_sheets` is :obj:`False`
* The worksheets are file(s) indicated by :obj:`path` are not in the canonical order and
:obj:`ignore_sheet_order` is :obj:`False`
* Some models are not serializable
* The data contains parsing errors found by `read_model`
"""
# initialize reader
_, ext = splitext(path)
ext = ext.lower()
reader_cls = wc_utils.workbook.io.get_reader(ext)
reader = reader_cls(path)
# initialize reading
reader.initialize_workbook()
self._model_metadata = {}
# check that at least one model is defined
if models is None:
models = self.MODELS
if not isinstance(models, (list, tuple)):
models = [models]
# check that sheets can be unambiguously mapped to models
sheet_names = reader.get_sheet_names()
if sbtab:
toc_sheet_name = '!' + SBTAB_TOC_NAME
schema_sheet_name = '!' + SBTAB_SCHEMA_NAME
else:
toc_sheet_name = TOC_NAME
schema_sheet_name = SCHEMA_NAME
if toc_sheet_name in sheet_names:
sheet_names.remove(toc_sheet_name)
if schema_sheet_name in sheet_names:
sheet_names.remove(schema_sheet_name)
# drop metadata models unless they're requested
for metadata_model in (utils.DataRepoMetadata, utils.SchemaRepoMetadata):
if metadata_model not in models:
if metadata_model.Meta.verbose_name in sheet_names:
sheet_names.remove(metadata_model.Meta.verbose_name)
ambiguous_sheet_names = self.get_ambiguous_sheet_names(sheet_names, models)
if ambiguous_sheet_names:
msg = 'The following sheets cannot be unambiguously mapped to models:'
for sheet_name, models in ambiguous_sheet_names.items():
msg += '\n {}: {}'.format(sheet_name, ', '.join(model.__name__ for model in models))
raise ValueError(msg)
# optionally,
# * check every sheet is defined
# * check no extra sheets are defined
# * check the models are defined in the canonical order
expected_sheet_names = []
used_sheet_names = []
sheet_order = []
expected_sheet_order = []
for model in models:
model_sheet_name = self.get_model_sheet_name(sheet_names, model)
if model_sheet_name:
expected_sheet_names.append(model_sheet_name)
used_sheet_names.append(model_sheet_name)
sheet_order.append(sheet_names.index(model_sheet_name))
expected_sheet_order.append(model_sheet_name)
elif not inspect.isabstract(model):
if model.Meta.table_format == TabularOrientation.row:
expected_sheet_names.append(model.Meta.verbose_name_plural)
else:
expected_sheet_names.append(model.Meta.verbose_name)
if not ignore_missing_sheets:
missing_sheet_names = set(expected_sheet_names).difference(set(used_sheet_names))
if missing_sheet_names:
raise ValueError("Files/worksheets {} / '{}' must be defined".format(
basename(path), "', '".join(sorted(missing_sheet_names))))
if not ignore_extra_sheets:
extra_sheet_names = set(sheet_names).difference(set(used_sheet_names))
if extra_sheet_names:
raise ValueError("No matching models for worksheets/files {} / '{}'".format(
basename(path), "', '".join(sorted(extra_sheet_names))))
elif sbtab and ignore_extra_sheets:
extra_sheet_names = set(sheet_names).difference(set(used_sheet_names))
invalid_extra_sheet_names = [n for n in extra_sheet_names if n.startswith('!')]
if invalid_extra_sheet_names:
raise ValueError("No matching models for worksheets/files {} / '{}'".format(
basename(path), "', '".join(sorted(invalid_extra_sheet_names))))
if not ignore_sheet_order and ext == '.xlsx':
if not is_sorted(sheet_order):
raise ValueError('The sheets must be provided in this order:\n {}'.format(
'\n '.join(expected_sheet_order)))
# check that models are valid
for model in models:
model.validate_related_attributes()
# check that models are serializable
for model in models:
if not model.is_serializable():
raise ValueError('Class {}.{} cannot be serialized'.format(model.__module__, model.__name__))
# read objects
attributes = {}
data = {}
errors = {}
objects = {}
for model in models:
model_attributes, model_data, model_errors, model_objects = self.read_model(
reader, model,
include_all_attributes=include_all_attributes,
ignore_missing_attributes=ignore_missing_attributes,
ignore_extra_attributes=ignore_extra_attributes,
ignore_attribute_order=ignore_attribute_order,
ignore_empty_rows=ignore_empty_rows,
validate=validate,
sbtab=sbtab)
if model_attributes:
attributes[model] = model_attributes
if model_data:
data[model] = model_data
if model_errors:
errors[model] = model_errors
if model_objects:
objects[model] = model_objects
if errors:
forest = ["The model cannot be loaded because '{}' contains error(s):".format(basename(path))]
for model, model_errors in errors.items():
forest.append([quote(model.__name__)])
forest.append([model_errors])
raise ValueError(indent_forest(forest))
# link objects
objects_by_primary_attribute = {}
for model, objects_model in objects.items():
objects_by_primary_attribute[model] = {obj.get_primary_attribute(): obj for obj in objects_model}
errors = {}
decoded = {}
for model, objects_model in objects.items():
model_errors = self.link_model(model, attributes[model], data[model], objects_model,
objects_by_primary_attribute, decoded=decoded)
if model_errors:
errors[model] = model_errors
if errors:
forest = ["The model cannot be loaded because '{}' contains error(s):".format(basename(path))]
for model, model_errors in errors.items():
forest.append([quote(model.__name__)])
forest.append([model_errors])
raise ValueError(indent_forest(forest))
# convert to sets
for model in models:
if model in objects:
objects[model] = objects[model]
else:
objects[model] = []
for model, model_objects in objects_by_primary_attribute.items():
if model not in objects:
objects[model] = []
objects[model] = det_dedupe(objects[model] + list(model_objects.values()))
# validate
all_objects = []
for model in models:
all_objects.extend(objects[model])
if validate:
errors = Validator().validate(all_objects)
if errors:
raise ValueError(
indent_forest(['The model cannot be loaded because it fails to validate:', [errors]]))
# return
if group_objects_by_model:
return objects
else:
if all_objects:
return all_objects
else:
return None
def read_model(self, reader, model, include_all_attributes=True,
ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
validate=True,
sbtab=False):
""" Instantiate a list of objects from data in a table in a file
Args:
reader (:obj:`wc_utils.workbook.io.Reader`): reader
model (:obj:`type`): the model describing the objects' schema
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if the worksheet/files
don't have all of attributes in the model
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if attributes
in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided in the
canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`tuple` of
`list` of `Attribute`,
`list` of `list` of `object`,
`list` of `str`,
`list` of `Model`: tuple of
* attribute order of `data`
* a two-dimensional nested list of object data
* a list of parsing errors
* constructed model objects
"""
_, ext = splitext(reader.path)
ext = ext.lower()
sheet_name = self.get_model_sheet_name(reader.get_sheet_names(), model)
if not sheet_name:
return ([], [], None, [])
# get worksheet
exp_attrs, exp_sub_attrs, exp_headings, _, _, _ = get_fields(
model, {}, include_all_attributes=include_all_attributes, sbtab=sbtab)
if model.Meta.table_format == TabularOrientation.row:
data, _, headings, top_comments = self.read_sheet(model, reader, sheet_name,
num_column_heading_rows=len(exp_headings),
ignore_empty_rows=ignore_empty_rows,
sbtab=sbtab)
else:
data, headings, _, top_comments = self.read_sheet(model, reader, sheet_name,
num_row_heading_columns=len(exp_headings),
ignore_empty_cols=ignore_empty_rows,
sbtab=sbtab)
data = transpose(data)
if len(exp_headings) == 1:
group_headings = [None] * len(headings[-1])
else:
group_headings = headings[0]
attr_headings = headings[-1]
# prohibit duplicate headers
header_map = collections.defaultdict(lambda: 0)
for group_heading, attr_heading in zip(group_headings, attr_headings):
if group_heading is None:
g = None
else:
g = group_heading.lower()
if attr_heading is None:
continue
l = attr_heading.lower()
header_map[(g, l)] += 1
duplicate_headers = [x for x, y in header_map.items() if y > 1]
if duplicate_headers:
errors = []
for dup_group, dup in duplicate_headers:
errors.append("{}:'{}': Duplicate, case insensitive, headers: {}: {}".format(
basename(reader.path), sheet_name, dup_group, dup))
return ([], [], errors, [])
# acquire attributes by header order
sub_attrs = []
good_columns = []
errors = []
for idx, (group_heading, attr_heading) in enumerate(zip(group_headings, attr_headings), start=1):
group_attr, attr = utils.get_attribute_by_name(model, group_heading, attr_heading, case_insensitive=True)
if not attr:
group_attr, attr = utils.get_attribute_by_name(
model, group_heading, attr_heading, case_insensitive=True, verbose_name=True)
if attr is not None:
sub_attrs.append((group_attr, attr))
if attr is None and not ignore_extra_attributes:
row, col, hdr_entries = self.header_row_col_names(idx, ext, model.Meta.table_format)
if attr_heading is None or attr_heading == '':
errors.append("Empty header field in row {}, col {} - delete empty {}(s)".format(
row, col, hdr_entries))
else:
errors.append("Header '{}' in row {}, col {} does not match any attribute".format(
attr_heading, row, col))
if attr is None and sbtab and ignore_extra_attributes:
if isinstance(attr_heading, str) and attr_heading.startswith('!'):
row, col, hdr_entries = self.header_row_col_names(idx, ext, model.Meta.table_format)
errors.append("Header '{}' in row {}, col {} does not match any attribute".format(
attr_heading[1:], row, col))
if ignore_extra_attributes:
if attr is None:
good_columns.append(0)
else:
good_columns.append(1)
if errors:
return ([], [], errors, [])
# optionally, check that all attributes have column headings
if not ignore_missing_attributes:
missing_sub_attrs = set(exp_sub_attrs).difference(set(sub_attrs))
if missing_sub_attrs:
msgs = []
for missing_group_attr, missing_attr in missing_sub_attrs:
if missing_group_attr:
msgs.append(missing_group_attr.name + '.' + missing_attr.name)
else:
msgs.append(missing_attr.name)
error = 'The following attributes must be defined:\n {}'.format('\n '.join(msgs))
return ([], [], [error], [])
# optionally, check that the attributes are defined in the canonical order
if not ignore_attribute_order:
canonical_sub_attrs = list(filter(lambda sub_attr: sub_attr in sub_attrs, exp_sub_attrs))
if sub_attrs != canonical_sub_attrs:
if model.Meta.table_format == TabularOrientation.row:
orientation = 'columns'
else:
orientation = 'rows'
if len(exp_headings) == 1:
if model.Meta.table_format == TabularOrientation.row:
msgs = ['{}1: {}'.format(get_column_letter(i + 1), a) for i, a in enumerate(exp_headings[0])]
else:
msgs = ['A{}: {}'.format(i + 1, a) for i, a in enumerate(exp_headings[0])]
else:
if model.Meta.table_format == TabularOrientation.row:
msgs = ['{}1: {}\n {}2: {}'.format(get_column_letter(i + 1), g or '', get_column_letter(i + 1), a)
for i, (g, a) in enumerate(zip(exp_headings[0], exp_headings[1]))]
else:
msgs = ['A{}: {}\n B{}: {}'.format(i + 1, g or '', i + 1, a)
for i, (g, a) in enumerate(zip(exp_headings[0], exp_headings[1]))]
error = "The {} of worksheet '{}' must be defined in this order:\n {}".format(
orientation, sheet_name, '\n '.join(msgs))
return ([], [], [error], [])
# save model location in file
attribute_seq = []
for group_heading, attr_heading in zip(group_headings, attr_headings):
group_attr, attr = utils.get_attribute_by_name(model, group_heading, attr_heading, case_insensitive=True)
if not attr:
group_attr, attr = utils.get_attribute_by_name(
model, group_heading, attr_heading, case_insensitive=True, verbose_name=True)
if attr is None:
attribute_seq.append('')
elif group_attr is None:
attribute_seq.append(attr.name)
else:
attribute_seq.append(group_attr.name + '.' + attr.name)
# group comments with objects
if sbtab:
objs_comments = []
obj_comments = top_comments
for row in list(data):
if row and isinstance(row[0], str) and row[0].startswith('%'):
obj_comments.append(row[0][1:].strip())
data.remove(row)
else:
objs_comments.append(obj_comments)
obj_comments = []
if obj_comments:
assert objs_comments, 'Each comment must be associated with a row.'
objs_comments[-1].extend(obj_comments)
else:
objs_comments = [[]] * len(data)
# load the data into objects
objects = []
errors = []
transposed = model.Meta.table_format == TabularOrientation.column
for row_num, (obj_data, obj_comments) in enumerate(zip(data, objs_comments), start=2):
obj = model()
obj._comments = obj_comments
# save object location in file
obj.set_source(reader.path, sheet_name, attribute_seq, row_num)
obj_errors = []
if ignore_extra_attributes:
obj_data = list(compress(obj_data, good_columns))
for (group_attr, sub_attr), attr_value in zip(sub_attrs, obj_data):
try:
if not group_attr and not isinstance(sub_attr, RelatedAttribute):
value, deserialize_error = sub_attr.deserialize(attr_value)
validation_error = sub_attr.validate(sub_attr.__class__, value)
if deserialize_error or validation_error:
if deserialize_error:
deserialize_error.set_location_and_value(utils.source_report(obj, sub_attr.name),
attr_value)
obj_errors.append(deserialize_error)
if validation_error:
validation_error.set_location_and_value(utils.source_report(obj, sub_attr.name),
attr_value)
obj_errors.append(validation_error)
setattr(obj, sub_attr.name, value)
except Exception as e:
error = InvalidAttribute(sub_attr, ["{}".format(e)])
error.set_location_and_value(utils.source_report(obj, sub_attr.name), attr_value)
obj_errors.append(error)
if obj_errors:
errors.append(InvalidObject(obj, obj_errors))
objects.append(obj)
model.get_manager().insert_all_new()
if not validate:
errors = []
return (sub_attrs, data, errors, objects)
def read_sheet(self, model, reader, sheet_name, num_row_heading_columns=0, num_column_heading_rows=0,
ignore_empty_rows=False, ignore_empty_cols=False, sbtab=False):
""" Read worksheet or file into a two-dimensional list
Args:
model (:obj:`type`): the model describing the objects' schema
reader (:obj:`wc_utils.workbook.io.Reader`): reader
sheet_name (:obj:`str`): worksheet name
num_row_heading_columns (:obj:`int`, optional): number of columns of row headings
num_column_heading_rows (:obj:`int`, optional): number of rows of column headings
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
ignore_empty_cols (:obj:`bool`, optional): if :obj:`True`, ignore empty columns
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`tuple`:
* :obj:`list` of :obj:`list`: two-dimensional list of table values
* :obj:`list` of :obj:`list`: row headings
* :obj:`list` of :obj:`list`: column_headings
* :obj:`list` of :obj:`str`: comments above column headings
Raises:
:obj:`ValueError`: if worksheet doesn't have header rows or columns
"""
data = reader.read_worksheet(sheet_name)
# strip out rows with table name and description
model_metadata, top_comments = self.read_worksheet_metadata(data, sbtab=sbtab)
self._model_metadata[model] = model_metadata
if sbtab:
assert model_metadata['TableID'] == sheet_name[1:], \
"TableID must be '{}'.".format(sheet_name[1:])
if len(data) < num_column_heading_rows:
raise ValueError("Worksheet '{}' must have {} header row(s)".format(
sheet_name, num_column_heading_rows))
if (num_row_heading_columns > 0 and len(data) == 0) or len(data[0]) < num_row_heading_columns:
raise ValueError("Worksheet '{}' must have {} header column(s)".format(
sheet_name, num_row_heading_columns))
# separate header rows
column_headings = []
for i_row in range(num_column_heading_rows):
column_headings.append(data.pop(0))
# separate header columns
row_headings = []
for i_col in range(num_row_heading_columns):
row_heading = []
row_headings.append(row_heading)
for row in data:
row_heading.append(row.pop(0))
for column_heading in column_headings:
column_heading.pop(0) # pragma: no cover # unreachable because row_headings and column_headings cannot both be non-empty
# remove empty rows and columns
def remove_empty_rows(data):
for row in list(data):
empty = True
for cell in row:
if cell not in ['', None]:
empty = False
break
if empty:
data.remove(row)
if ignore_empty_rows:
remove_empty_rows(data)
if ignore_empty_cols:
data = transpose(data)
remove_empty_rows(data)
data = transpose(data)
return (data, row_headings, column_headings, top_comments)
@staticmethod
def read_worksheet_metadata(rows, sbtab=False):
""" Read worksheet metadata
Args:
rows (:obj:`list`): rows
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`tuple`:
* :obj:`dict` or :obj:`list`: if :obj:`sbtab`, returns a dictionary of properties;
otherwise returns a list of comments
* :obj:`list` of :obj:`str`: comments
"""
if sbtab:
format = 'SBtab'
version = '2.0'
else:
format = 'ObjModel'
version = obj_model.__version__
metadata_headings = []
comments = []
for row in list(rows):
if not row or all(cell in ['', None] for cell in row):
rows.remove(row)
elif sbtab and row and isinstance(row[0], str) and row[0].startswith('%'):
comment = row[0][1:].strip()
if comment:
comments.append(comment)
rows.remove(row)
elif row and isinstance(row[0], str) and row[0].startswith('!!'):
if row[0].startswith('!!' + format):
metadata_headings.append(row[0])
rows.remove(row)
else:
break
metadata = {}
for metadata_heading in metadata_headings:
pattern = r"^!!{}( +(.*?)='((?:[^'\\]|\\.)*)')* *$".format(format)
assert re.match(pattern, metadata_heading), \
'Metadata must consist of a list of key-value pairs.'
results = re.findall(r" +(.*?)='((?:[^'\\]|\\.)*)'",
metadata_heading[len(format) + 2:])
for key, val in results:
assert key not in metadata, "'{}' metadata cannot be repeated.".format(key)
metadata[key] = val
if sbtab:
assert len(metadata_headings) == 1, 'Metadata must consist of a list of key-value pairs.'
assert metadata[format + 'Version'] == version, 'Version must be ' + version
return (metadata, comments)
def link_model(self, model, attributes, data, objects, objects_by_primary_attribute, decoded=None):
""" Construct object graph
Args:
model (:obj:`Model`): an `obj_model.core.Model`
attributes (:obj:`list` of :obj:`Attribute`): attribute order of `data`
data (:obj:`list` of :obj:`list` of :obj:`object`): nested list of object data
objects (:obj:`list`): list of model objects in order of `data`
objects_by_primary_attribute (:obj:`dict`): dictionary of model objects grouped by model
decoded (:obj:`dict`, optional): dictionary of objects that have already been decoded
Returns:
:obj:`list` of :obj:`str`: list of parsing errors
"""
errors = []
for obj_data, obj in zip(data, objects):
for (group_attr, sub_attr), attr_value in zip(attributes, obj_data):
if group_attr is None and isinstance(sub_attr, RelatedAttribute):
value, error = sub_attr.deserialize(attr_value, objects_by_primary_attribute, decoded=decoded)
if error:
error.set_location_and_value(utils.source_report(obj, sub_attr.name), attr_value)
errors.append(error)
else:
setattr(obj, sub_attr.name, value)
elif group_attr and attr_value not in [None, '']:
if isinstance(sub_attr, RelatedAttribute):
value, error = sub_attr.deserialize(attr_value, objects_by_primary_attribute, decoded=decoded)
else:
value, error = sub_attr.deserialize(attr_value)
if error:
error.set_location_and_value(utils.source_report(obj, group_attr.name + '.' + sub_attr.name), attr_value)
errors.append(error)
else:
sub_obj = getattr(obj, group_attr.name)
if not sub_obj:
sub_obj = group_attr.related_class()
setattr(obj, group_attr.name, sub_obj)
setattr(sub_obj, sub_attr.name, value)
for attr in model.Meta.attributes.values():
if isinstance(attr, RelatedAttribute) and attr.related_class.Meta.table_format == TabularOrientation.multiple_cells:
val = getattr(obj, attr.name)
if val:
if attr.related_class not in objects_by_primary_attribute:
objects_by_primary_attribute[attr.related_class] = {}
serialized_val = val.serialize()
same_val = objects_by_primary_attribute[attr.related_class].get(serialized_val, None)
if same_val:
for sub_attr in attr.related_class.Meta.attributes.values():
sub_val = getattr(val, sub_attr.name)
if isinstance(sub_val, list):
setattr(val, sub_attr.name, [])
else:
setattr(val, sub_attr.name, None)
setattr(obj, attr.name, same_val)
else:
objects_by_primary_attribute[attr.related_class][serialized_val] = val
return errors
@classmethod
def header_row_col_names(cls, index, file_ext, table_format):
""" Determine row and column names for header entries.
Args:
index (:obj:`int`): index in header sequence
file_ext (:obj:`str`): extension for model file
orientation (:obj:`TabularOrientation`): orientation of the stored table
Returns:
:obj:`tuple` of row, column, header_entries
"""
if table_format == TabularOrientation.row:
row, col, hdr_entries = (1, index, 'column')
else:
row, col, hdr_entries = (index, 1, 'row')
if 'xlsx' in file_ext:
col = excel_col_name(col)
return (row, col, hdr_entries)
@classmethod
def get_model_sheet_name(cls, sheet_names, model):
""" Get the name of the worksheet/file which corresponds to a model
Args:
sheet_names (:obj:`list` of :obj:`str`): names of the sheets in the workbook/files
model (:obj:`Model`): model
Returns:
:obj:`str`: name of sheet corresponding to the model or `None` if there is no sheet for the model
Raises:
:obj:`ValueError`: if the model matches more than one sheet
"""
used_sheet_names = []
possible_sheet_names = cls.get_possible_model_sheet_names(model)
for sheet_name in sheet_names:
for possible_sheet_name in possible_sheet_names:
if sheet_name.lower() == possible_sheet_name.lower():
used_sheet_names.append(sheet_name)
break
used_sheet_names = det_dedupe(used_sheet_names)
if len(used_sheet_names) == 1:
return used_sheet_names[0]
if len(used_sheet_names) > 1:
raise ValueError('Model {} matches multiple sheets'.format(model.__name__))
return None
@classmethod
def get_possible_model_sheet_names(cls, model):
""" Return set of possible sheet names for a model
Args:
model (:obj:`Model`): Model
Returns:
:obj:`set`: set of possible sheet names for a model
"""
return set([model.__name__, model.Meta.verbose_name, model.Meta.verbose_name_plural])
@classmethod
def get_ambiguous_sheet_names(cls, sheet_names, models):
""" Get names of sheets that cannot be unambiguously mapped to models (sheet names that map to multiple models).
Args:
sheet_names (:obj:`list` of :obj:`str`): names of the sheets in the workbook/files
models (:obj:`list` of :obj:`Model`): list of models
Returns:
:obj:`dict` of :obj:`str`, :obj:`list` of :obj:`Model`: dictionary of ambiguous sheet names and their matching models
"""
sheets_to_models = {}
for sheet_name in sheet_names:
sheets_to_models[sheet_name] = []
for model in models:
for possible_sheet_name in cls.get_possible_model_sheet_names(model):
if sheet_name == possible_sheet_name:
sheets_to_models[sheet_name].append(model)
if len(sheets_to_models[sheet_name]) <= 1:
sheets_to_models.pop(sheet_name)
return sheets_to_models
class Reader(ReaderBase):
@staticmethod
def get_reader(path):
""" Get the IO class whose `run()` method can read the file(s) at `path`
Args:
path (:obj:`str`): path to write file(s)
Returns:
:obj:`type`: reader class
Raises:
:obj:`ValueError`: if extension is not supported
"""
_, ext = splitext(path)
ext = ext.lower()
if ext in ['.csv', '.tsv', '.xlsx']:
return WorkbookReader
elif ext in ['.json', '.yaml', '.yml']:
return JsonReader
else:
raise ValueError('Invalid export format: {}'.format(ext))
def run(self, path, models=None,
ignore_missing_sheets=False, ignore_extra_sheets=False, ignore_sheet_order=False,
include_all_attributes=True, ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
group_objects_by_model=False, validate=True,
sbtab=False):
""" Read a list of model objects from file(s) and, optionally, validate them
Args:
path (:obj:`str`): path to file(s)
models (:obj:`types.TypeType` or :obj:`list` of :obj:`types.TypeType`, optional): type
of object to read or list of types of objects to read
ignore_missing_sheets (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_sheets (:obj:`bool`, optional): if :obj:`True` and all `models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in `models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided
in the canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
group_objects_by_model (:obj:`bool`, optional): if :obj:`True`, group decoded objects by their
types
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
Returns:
:obj:`obj`: if `group_objects_by_model` is set returns :obj:`dict`: model objects grouped
by `Model` class, otherwise returns :obj:`list`: of model objects
"""
Reader = self.get_reader(path)
reader = Reader()
result = reader.run(path, models=models,
ignore_missing_sheets=ignore_missing_sheets,
ignore_extra_sheets=ignore_extra_sheets,
ignore_sheet_order=ignore_sheet_order,
include_all_attributes=include_all_attributes,
ignore_missing_attributes=ignore_missing_attributes,
ignore_extra_attributes=ignore_extra_attributes,
ignore_attribute_order=ignore_attribute_order,
ignore_empty_rows=ignore_empty_rows,
group_objects_by_model=group_objects_by_model,
validate=validate,
sbtab=sbtab)
self._model_metadata = reader._model_metadata
return result
def convert(source, destination, models,
ignore_missing_sheets=False, ignore_extra_sheets=False, ignore_sheet_order=False,
include_all_attributes=True, ignore_missing_attributes=False, ignore_extra_attributes=False,
ignore_attribute_order=False, ignore_empty_rows=True,
sbtab=False):
""" Convert among comma-separated (.csv), Excel (.xlsx), JavaScript Object Notation (.json),
tab-separated (.tsv), and Yet Another Markup Language (.yaml, .yml) formats
Args:
source (:obj:`str`): path to source file
destination (:obj:`str`): path to save converted file
models (:obj:`list` of :obj:`type`): list of models
ignore_missing_sheets (:obj:`bool`, optional): if :obj:`False`, report an error if a worksheet/
file is missing for one or more models
ignore_extra_sheets (:obj:`bool`, optional): if :obj:`True` and all `models` are found, ignore
other worksheets or files
ignore_sheet_order (:obj:`bool`, optional): if :obj:`True`, do not require the sheets to be provided
in the canonical order
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
ignore_missing_attributes (:obj:`bool`, optional): if :obj:`False`, report an error if a
worksheet/file doesn't contain all of attributes in a model in `models`
ignore_extra_attributes (:obj:`bool`, optional): if :obj:`True`, do not report errors if
attributes in the data are not in the model
ignore_attribute_order (:obj:`bool`, optional): if :obj:`True`, do not require the attributes to be provided
in the canonical order
ignore_empty_rows (:obj:`bool`, optional): if :obj:`True`, ignore empty rows
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
reader = Reader.get_reader(source)()
writer = Writer.get_writer(destination)()
kwargs = {}
if isinstance(reader, WorkbookReader):
kwargs['ignore_missing_sheets'] = ignore_missing_sheets
kwargs['ignore_extra_sheets'] = ignore_extra_sheets
kwargs['ignore_sheet_order'] = ignore_sheet_order
kwargs['include_all_attributes'] = include_all_attributes
kwargs['ignore_missing_attributes'] = ignore_missing_attributes
kwargs['ignore_extra_attributes'] = ignore_extra_attributes
kwargs['ignore_attribute_order'] = ignore_attribute_order
kwargs['ignore_empty_rows'] = ignore_empty_rows
objects = reader.run(source, models=models, group_objects_by_model=False,
sbtab=sbtab, **kwargs)
writer.run(destination, objects, model_metadata=reader._model_metadata,
models=models, get_related=False, sbtab=sbtab)
def create_template(path, models, title=None, description=None, keywords=None,
version=None, language=None, creator=None, toc=True,
extra_entries=10, sbtab=False):
""" Create a template for a model
Args:
path (:obj:`str`): path to write file(s)
models (:obj:`list`): list of model, in the order that they should
appear as worksheets; all models which are not in `models` will
follow in alphabetical order
title (:obj:`str`, optional): title
description (:obj:`str`, optional): description
keywords (:obj:`str`, optional): keywords
version (:obj:`str`, optional): version
language (:obj:`str`, optional): language
creator (:obj:`str`, optional): creator
toc (:obj:`bool`, optional): if :obj:`True`, include additional worksheet with table of contents
extra_entries (:obj:`int`, optional): additional entries to display
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab format
"""
Writer.get_writer(path)().run(path, [], models=models,
title=title, description=description, keywords=keywords,
version=version, language=language, creator=creator,
toc=toc, extra_entries=extra_entries,
sbtab=sbtab)
def get_fields(cls, metadata, include_all_attributes=True, sheet_models=None,
sbtab=False):
""" Get the attributes, headings, and validation for a worksheet
Args:
cls (:obj:`type`): Model type (subclass of :obj:`Model`)
metadata (:obj:`dict`): dictionary of model metadata
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
sheet_models (:obj:`list` of :obj:`Model`, optional): list of models encoded as separate worksheets; used
to setup Excel validation for related attributes
sbtab (:obj:`bool`, optional): if :obj:`True`, use SBtab formatting
Returns:
:obj:`tuple`:
* :obj:`list` of :obj:`Attribute`: Attributes of :obj:`cls` in the order they should be encoded as one or
more columns in a worksheet. Attributes which define \*-to-one relationships to other classes which
are encoded as multiple cells (:obj:`TabularOrientation.multiple_cells`) will be encoded as multiple
columns. All other attributes will be encoded as a single column.
This represents a nested tree of attributes.
For classes which have \*-to-one relationships to other classes which are encoded as multiple cells, the tree
has two levels. For all other classes, the tree only has a single level.
* :obj:`list` of tuple of :obj:`Attribute`:
Flattened representation of the first return value. This is a list of attributes of
:obj:`cls` and attributes of classes related to :obj:`cls` by \*-to-one relationships that are encoded as multiple cells
(:obj:`TabularOrientation.multiple_cells`), in the order they are encoded as columns in a worksheet.
Each element of the list is a tuple.
1. For attributes of :obj:`cls` that represent \*-to-one relationships to classes encoded
as multiple cells, the first element will be the attribute. This will be used to populate a merged cell in Row 1 of the worksheet
which represents the heading for the multiple columns that encode the attributes of the related class. For all other attributes,
the first element will be :obj:`None`, and no value will be printed in Row 1.
2. The second element will be the attribute that should be encoded in the column. For attributes that represent
\*-to-one relationships to related classes encoded as multiple cells, this will be an attribute of the related class. For all
other attributes, this will be an attribute of :obj:`cls`. This will be used to populate the columns headings for the worksheet.
For classes that have \*-to-one relationships with classes encoded as multiple columns, the column headings will appear in Row 2
(and the group headings specified by the first element of the tuple will be in Row 1). For all other classes, the column headings
will appear in Row 1.
* :obj:`list`: field headings
* :obj:`list`: list of field headings to merge
* :obj:`list`: list of field validations
* :obj:`list` of :obj:`list` :obj:`str`: model metadata (name and description)
to print at the top of the worksheet
"""
# attribute order
attrs = get_ordered_attributes(cls, include_all_attributes=include_all_attributes)
# model metadata
if sbtab:
format = 'SBtab'
table_name = cls.Meta.verbose_name[1:]
version = '2.0'
else:
format = 'ObjModel'
table_name = cls.Meta.verbose_name_plural
version = obj_model.__version__
now = datetime.now()
metadata = dict(metadata)
metadata['TableID'] = cls.__name__
metadata['TableName'] = table_name
metadata.pop('Description', None)
if cls.Meta.description:
metadata['Description'] = cls.Meta.description
metadata['Date'] = '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}'.format(
now.year, now.month, now.day, now.hour, now.minute, now.second)
metadata[format + 'Version'] = version
keys = ['TableID', 'TableName', 'Date', format + 'Version']
if 'Description' in metadata:
keys.insert(2, 'Description')
keys += sorted(set(metadata.keys()) - set(keys))
metadata_heading_list = ["{}='{}'".format(k, metadata[k].replace("'", "\'")) for k in keys]
metadata_heading_list.insert(0, "!!" + format)
metadata_headings = [[' '.join(metadata_heading_list)]]
# column labels
sub_attrs = []
has_group_headings = False
group_headings = []
attr_headings = []
merge_ranges = []
field_validations = []
i_row = len(metadata_headings)
i_col = 0
for attr in attrs:
if isinstance(attr, RelatedAttribute) and attr.related_class.Meta.table_format == TabularOrientation.multiple_cells:
this_sub_attrs = get_ordered_attributes(attr.related_class, include_all_attributes=include_all_attributes)
sub_attrs.extend([(attr, sub_attr) for sub_attr in this_sub_attrs])
has_group_headings = True
group_headings.extend([attr.verbose_name] * len(this_sub_attrs))
attr_headings.extend([sub_attr.verbose_name for sub_attr in this_sub_attrs])
merge_ranges.append((i_row, i_col, i_row, i_col + len(this_sub_attrs) - 1))
i_col += len(this_sub_attrs)
field_validations.extend([sub_attr.get_excel_validation(sheet_models=sheet_models) for sub_attr in this_sub_attrs])
else:
sub_attrs.append((None, attr))
group_headings.append(None)
attr_headings.append(attr.verbose_name)
i_col += 1
field_validations.append(attr.get_excel_validation(sheet_models=sheet_models))
header_map = collections.defaultdict(list)
for group_heading, attr_heading in zip(group_headings, attr_headings):
header_map[((group_heading or '').lower(), attr_heading.lower())].append((group_heading, attr_heading))
duplicate_headers = list(filter(lambda x: 1 < len(x), header_map.values()))
if duplicate_headers:
errors = []
for dupes in duplicate_headers:
str = ', '.join(map(lambda s: "'{}.{}'".format(s[0], s[1]), dupes))
warn('Duplicate, case insensitive, header fields: {}'.format(str), IoWarning)
headings = []
if has_group_headings:
headings.append(group_headings)
headings.append(attr_headings)
return (attrs, sub_attrs, headings, merge_ranges, field_validations, metadata_headings)
def get_ordered_attributes(cls, include_all_attributes=True):
""" Get the attributes for a class in the order that they should be printed
Args:
cls (:obj:`type`): Model type (subclass of :obj:`Model`)
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in `Model.Meta.attribute_order`
Returns:
:obj:`list` of :obj:`Attribute`: attributes in the order they should be printed
"""
# get names of attributes in desired order
attr_names = cls.Meta.attribute_order
if include_all_attributes:
ordered_attr_names = attr_names
unordered_attr_names = set()
for base in cls.Meta.inheritance:
for attr_name in base.__dict__.keys():
if isinstance(getattr(base, attr_name), Attribute) and attr_name not in ordered_attr_names:
unordered_attr_names.add(attr_name)
unordered_attr_names = natsorted(unordered_attr_names, alg=ns.IGNORECASE)
attr_names = list(attr_names) + unordered_attr_names
# get attributes in desired order
attrs = [cls.Meta.attributes[attr_name] for attr_name in attr_names]
# error check
if cls.Meta.table_format == TabularOrientation.multiple_cells:
for attr in attrs:
if isinstance(attr, RelatedAttribute) and attr.related_class.Meta.table_format == TabularOrientation.multiple_cells:
raise ValueError('Classes with orientation "multiple_cells" cannot have relationships '
'to other classes with the same orientation')
# return attributes
return attrs
class IoWarning(ObjModelWarning):
""" IO warning """
pass
| mit |
ilo10/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
minesense/VisTrails | contrib/NumSciPy/ArrayPlot.py | 6 | 24193 | import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from core.modules.basic_modules import PythonSource
from Array import *
from Matrix import *
import pylab
import matplotlib
import urllib
import random
class ArrayPlot(object):
namespace = 'numpy|array|plotting'
def get_label(self, ar, i):
lab = ar.get_name(i)
if lab == None:
return 'Array ' + str(i)
else:
return lab
def is_cacheable(self):
return False
def get_color(self, colors, i, randomcolor):
if randomcolor:
return (random.random(), random.random(), random.random())
if self.color_dict == None:
self.color_dict = {}
for (k,r,g,b) in colors:
self.color_dict[k] = (r,g,b)
if self.color_dict.has_key(i):
return self.color_dict[i]
else:
return None
def get_marker(self, markers, i):
if markers == None:
return None
if self.marker_dict == None:
self.marker_dict = {}
for (k,m) in markers:
self.marker_dict[k] = m
if self.marker_dict.has_key(i):
return self.marker_dict[i]
else:
return None
def get_alpha(self, alphas, i):
return None
class ArrayImage(ArrayPlot, Module):
'''
Display the 2D input Data Array as a color-mapped image.
Independent control of the aspect ratio, colormap, presence of the
colorbar and presented axis values are provided through the
appropriate input ports: Aspect Ratio, Colormap, Colorbar,
Extents. To change the colormap being used, it must be one of the
pre-made maps provided by matplotlib.cm. Only 1 2D array can be
viewed at a time.
'''
def compute(self):
data = self.get_input("Data Array")
da_ar = data.get_array().squeeze()
if da_ar.ndim != 2:
raise ModuleError("Input Data Array must have dimension = 2")
aspect_ratio = self.force_get_input("Aspect Ratio")
colormap = self.force_get_input("Colormap")
colorbar = self.force_get_input("Colorbar")
extents = self.force_get_input("Extents")
# Quickly check the assigned colormap to make sure it's valid
if colormap == None:
colormap = "jet"
if not hasattr(pylab, colormap):
colormap = "jet"
bg_color = self.force_get_input("Background")
array_x_t = self.force_get_input("Use X Title")
array_y_t = self.force_get_input("Use Y Title")
p_title = self.force_get_input("Title")
x_label = self.force_get_input("X Title")
y_label = self.force_get_input("Y Title")
s = urllib.unquote(str(self.force_get_input("source", '')))
s = 'from pylab import *\n' +\
'from numpy import *\n' +\
'import numpy\n'
if bg_color == None:
bg_color = 'w'
if type(bg_color) == type(''):
s += 'figure(facecolor=\'' + bg_color + '\')\n'
else:
s += 'figure(facecolor=' + str(bg_color) + ')\n'
s += 'imshow(da_ar, interpolation=\'bicubic\''
if aspect_ratio != None:
s += ', aspect=' + str(aspect_ratio)
if extents != None:
s += ', extent=['+str(extents[0])+','+str(extents[1])+','+str(extents[2])+','+str(extents[3])+']'
s += ')\n'
s += colormap + '()\n'
if colorbar:
s += 'colorbar()\n'
if array_x_t:
s += 'xlabel(\'' + data.get_domain_name() + '\')\n'
elif x_label:
s += 'xlabel(\'' + x_label + '\')\n'
if array_y_t:
s += 'ylabel(\'' + data.get_range_name() + '\')\n'
elif y_label:
s += 'ylabel(\'' + y_label + '\')\n'
if p_title:
s += 'title(\'' + p_title + '\')\n'
exec s
self.set_output('source', s)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.namespace)
@classmethod
def register_ports(cls, reg, basic):
reg.add_input_port(cls, "source", (basic.String, 'source'), True)
reg.add_input_port(cls, "Data Array", (NDArray, 'Array to Plot'))
reg.add_input_port(cls, "Aspect Ratio", (basic.Float, 'Aspect Ratio'))
reg.add_input_port(cls, "Colormap", (basic.String, 'Colormap'))
reg.add_input_port(cls, "Colorbar", (basic.Boolean, 'Show Colorbar'), True)
reg.add_input_port(cls, "Extents", [basic.Float, basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Background", [basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Use X Title", (basic.Boolean, 'Apply X-axis Label'))
reg.add_input_port(cls, "Use Y Title", (basic.Boolean, 'Apply Y-axis Label'))
reg.add_input_port(cls, "Title", (basic.String, 'Figure Title'))
reg.add_input_port(cls, "X Title", (basic.String, 'X-axis label'), True)
reg.add_input_port(cls, "Y Title", (basic.String, 'Y-axis label'), True)
reg.add_output_port(cls, "source", (basic.String, 'source'))
class Histogram(ArrayPlot, Module):
'''
Plot a histogram of the data values. Multiple datasets can be
presented by providing multiple connections to the Data Array
port. These data are then differentiated by assigned colors and
labels. By default, 10 bins are used to histogram the data.
Additionally, recapturing the PDF of the data is possible by
enabling the Normalize option.
'''
def compute(self):
data = self.get_input_list("Data Array")
self.label_dict = None
self.color_dict = None
use_legend = self.force_get_input("Legend")
randomcolors = self.force_get_input("Random Colors")
colors = self.force_get_input_list("Colors")
bg_color = self.force_get_input("Background")
array_x_t = self.force_get_input("Use X Title")
array_y_t = self.force_get_input("Use Y Title")
p_title = self.force_get_input("Title")
x_label = self.force_get_input("X Title")
nbins = self.force_get_input("Bins")
if nbins == None:
nbins = 10
normed = self.force_get_input("Normalize")
if normed == None:
normed = False
s = urllib.unquote(str(self.force_get_input("source", '')))
self.source = ''
s = 'from pylab import *\n' +\
'from numpy import *\n' +\
'import numpy\n'
if bg_color == None:
bg_color = 'w'
if type(bg_color) == type(''):
s += 'figure(facecolor=\'' + bg_color + '\')\n'
else:
s += 'figure(facecolor=' + str(bg_color) + ')\n'
data_list = []
for i in data:
data_list.append(i.get_array().squeeze())
da_ar = None
try:
da_ar = numpy.array(data_list)
except:
raise ModuleException("Not all Data Array inputs are the same size!")
for i in range(da_ar.shape[0]):
lab = self.get_label(data[i], i)
col = self.get_color(colors, i, randomcolors)
s += 'hist(da_ar['+str(i)+',:], bins=' + str(nbins)
if lab != None:
s += ', label=\'' + lab + '\''
if col != None:
s += ', facecolor=' + str(col)
s += ', normed='+str(normed)
s += ')\n'
if use_legend:
s += 'legend()\n'
if array_x_t:
s += 'xlabel(\'' + data[0].get_domain_name() + '\')\n'
elif x_label:
s += 'xlabel(\'' + x_label + '\')\n'
if array_y_t:
s += 'ylabel(\'Histogram Value\')\n'
if p_title:
s += 'title(\'' + p_title + '\')\n'
exec s
self.set_output("source", s)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.namespace)
@classmethod
def register_ports(cls, reg, basic):
reg.add_input_port(cls, "source", (basic.String, 'source'), True)
reg.add_input_port(cls, "Data Array", (NDArray, 'Data Array to Plot'))
reg.add_input_port(cls, "Legend", (basic.Boolean, 'Use Legend'), True)
reg.add_input_port(cls, "Random Colors", (basic.Boolean, 'Assign Random Colors'), True)
reg.add_input_port(cls, "Colors", [basic.Integer, basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Background", [basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Use X Title", (basic.Boolean, 'Apply X-axis Label'))
reg.add_input_port(cls, "Use Y Title", (basic.Boolean, 'Apply Y-axis Label'))
reg.add_input_port(cls, "Title", (basic.String, 'Figure Title'))
reg.add_input_port(cls, "X Title", (basic.String, 'X-axis label'), True)
reg.add_input_port(cls, "Bins", (basic.Integer, 'Number of Bins'))
reg.add_input_port(cls, "Normalize", (basic.Boolean, 'Normalize to PDF'), True)
reg.add_output_port(cls, "source", (basic.String, 'source'))
class BarChart(ArrayPlot, Module):
'''
Create a bar chart of the input data. Different datasets can be
used simultaneously by connecting to the Data input port multiple
times. Each successive data connection will be rendered on top of
the previous dataset. This creates a stacked bar chart. Error
bars are drawn with the errors for each of the datasets connected
to the Error Bars input port.
'''
def get_ticks(self, num):
a = []
for i in range(num):
a.append('')
for i in self.tick_dict.keys():
a[i] = self.tick_dict[i]
return a
def compute(self):
data = self.get_input_list("Data")
errs = self.force_get_input_list("Error Bars")
if len(errs) == 0:
errs = None
self.label_dict = None
self.color_dict = None
use_legend = self.force_get_input("Legend")
randomcolors = self.force_get_input("Random Colors")
colors = self.force_get_input_list("Colors")
bg_color = self.force_get_input("Background")
array_x_t = self.force_get_input("Use X Title")
array_y_t = self.force_get_input("Use Y Title")
ticks = self.force_get_input_list("Bar Labels")
self.tick_dict = {}
for (k,v) in ticks:
self.tick_dict[k] = v
p_title = self.force_get_input("Title")
x_label = self.force_get_input("X Title")
y_label = self.force_get_input("Y Title")
width = self.force_get_input("Bar Width")
if width == None:
width = 0.5
if errs != None:
if len(data) != len(errs):
raise ModuleError("Number of data does not match number of error bar data")
s = urllib.unquote(str(self.force_get_input("source", '')))
self.source = ''
s = 'from pylab import *\n' +\
'from numpy import *\n' +\
'import numpy\n'
if bg_color == None:
bg_color = 'w'
if type(bg_color) == type(''):
s += 'figure(facecolor=\'' + bg_color + '\')\n'
else:
s += 'figure(facecolor=' + str(bg_color) + ')\n'
numpts = None
ind = None
prev = None
ind = numpy.arange(data[0].get_array().flatten().shape[0])
t = self.get_ticks(data[0].get_array().flatten().shape[0])
ag_ar = numpy.zeros((len(data), data[0].get_array().flatten().shape[0]))
for i in range(len(data)):
da_ar = data[i].get_array().flatten()
ag_ar[i,:] = da_ar
er_ar = numpy.zeros((len(data), data[0].get_array().flatten().shape[0]))
if errs != None:
for i in range(len(data)):
er_ar[i,:] = errs[i].get_array().flatten()
for i in range(ag_ar.shape[0]):
s += 'bar(ind, ag_ar[' + str(i) + ',:], width'
lab = self.get_label(data[i], i)
col = self.get_color(colors, i, randomcolors)
if lab != None:
s += ', label=\'' + lab + '\''
if col != None:
s += ', color=' + str(col)
if errs != None:
s += ', yerr=er_ar[' + str(i) + ',:]'
if prev != None:
s += ', bottom=ag_ar[' + str(i-1) + ',:]'
s += ')\n'
prev = ag_ar[i]
if use_legend:
s += 'legend()\n'
if array_x_t:
s += 'xlabel(\'' + data[0].get_domain_name() + '\')\n'
elif x_label:
s += 'xlabel(\'' + x_label + '\')\n'
if array_y_t:
s += 'ylabel(\'' + data[0].get_range_name() + '\')\n'
elif y_label:
s += 'ylabel(\'' + y_label + '\')\n'
if p_title:
s += 'title(\'' + p_title + '\')\n'
s += 'xticks(ind + width/2., t)\n'
exec s
self.set_output("source", s)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.namespace)
@classmethod
def register_ports(cls, reg, basic):
reg.add_input_port(cls, "source", (basic.String, 'source'), True)
reg.add_input_port(cls, "Data", (NDArray, 'Data Array to Plot'))
reg.add_input_port(cls, "Error Bars", (NDArray, 'Error Array to Plot'))
reg.add_input_port(cls, "Legend", (basic.Boolean, 'Use Legend'), True)
reg.add_input_port(cls, "Random Colors", (basic.Boolean, 'Assign Random Colors'), True)
reg.add_input_port(cls, "Colors", [basic.Integer, basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Background", [basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Bar Labels", [basic.Integer, basic.String], True)
reg.add_input_port(cls, "Use X Title", (basic.Boolean, 'Apply X-axis Label'))
reg.add_input_port(cls, "Use Y Title", (basic.Boolean, 'Apply Y-axis Label'))
reg.add_input_port(cls, "X Title", (basic.String, 'X-axis label'), True)
reg.add_input_port(cls, "Y Title", (basic.String, 'Y-axis label'), True)
reg.add_input_port(cls, "Title", (basic.String, 'Figure Title'))
reg.add_input_port(cls, "Bar Width", (basic.Float, 'Bar Width'), True)
reg.add_output_port(cls, "source", (basic.String, 'source'))
class ScatterPlot(ArrayPlot, Module):
'''
Create a scatter plot from X and Y positions defined by the X
Array and Y Array ports, respectively. Datasets can be added by
connecting multiple arrays to the appropriate input ports.
Symbols representing each dataset can be defined by using the
Markers input assigning a valid pylab symbol to a dataset.
'''
def compute(self):
xdata = self.get_input_list("X Array")
ydata = self.get_input_list("Y Array")
self.label_dict = None
use_legend = self.force_get_input("Legend")
randomcolors = self.force_get_input("Random Colors")
colors = self.force_get_input_list("Colors")
self.color_dict = None
bg_color = self.force_get_input("Background")
markers = self.force_get_input_list("Markers")
self.marker_dict = None
ps = self.force_get_input("Point Size")
array_x_t = self.force_get_input("Use X Title")
array_y_t = self.force_get_input("Use Y Title")
p_title = self.force_get_input("Title")
x_label = self.force_get_input("X Title")
y_label = self.force_get_input("Y Title")
s = urllib.unquote(str(self.force_get_input("source", '')))
self.source = ''
if len(xdata) != len(ydata):
raise ModuleError("Cannot create scatter plot for different number of X and Y datasets.")
s = 'from pylab import *\n' +\
'from numpy import *\n' +\
'import numpy\n'
if bg_color == None:
bg_color = 'w'
if type(bg_color) == type(''):
s += 'figure(facecolor=\'' + bg_color + '\')\n'
else:
s += 'figure(facecolor=' + str(bg_color) + ')\n'
xdata_ar = numpy.zeros((len(xdata), xdata[0].get_array().flatten().shape[0]))
ydata_ar = numpy.zeros((len(xdata), xdata[0].get_array().flatten().shape[0]))
for i in range(len(xdata)):
xd = xdata[i]
yd = ydata[i]
xdata_ar[i,:] = xd.get_array().flatten()
ydata_ar[i,:] = yd.get_array().flatten()
for i in range(len(xdata)):
xar = xdata[i]
yar = ydata[i]
lab = self.get_label(xar, i)
col = self.get_color(colors, i, randomcolors)
mar = self.get_marker(markers, i)
s += 'scatter(xdata_ar[' + str(i) +',:], ydata_ar[' + str(i) + ',:]'
if lab != None:
s += ', label=\'' + lab +'\''
if col != None:
s += ', color=' + str(col)
if mar != None:
s += ', marker=\'' + mar + '\''
if ps != None:
s += ', size=' + str(ps)
s += ')\n'
if use_legend:
s += 'legend()\n'
if array_x_t:
s += 'xlabel(\'' + xar.get_domain_name() + '\')\n'
elif x_label:
s += 'xlabel(\'' + x_label + '\')\n'
if array_y_t:
s += 'ylabel(\'' + yar.get_domain_name() + '\')\n'
elif y_label:
s += 'ylabel(\'' + y_label + '\')\n'
if p_title:
s += 'title(\'' + p_title + '\')\n'
print s
exec s
self.set_output("source", s)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.namespace)
@classmethod
def register_ports(cls, reg, basic):
reg.add_input_port(cls, "source", (basic.String, 'source'), True)
reg.add_input_port(cls, "X Array", (NDArray, 'X Array to Plot'))
reg.add_input_port(cls, "Y Array", (NDArray, 'Y Array to Plot'))
reg.add_input_port(cls, "Legend", (basic.Boolean, 'Use Legend'), True)
reg.add_input_port(cls, "Random Colors", (basic.Boolean, 'Assign Random Colors'), True)
reg.add_input_port(cls, "Colors", [basic.Integer, basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Background", [basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Markers", [basic.Integer, basic.String], True)
reg.add_input_port(cls, "Use X Title", (basic.Boolean, 'Apply X-axis Label'))
reg.add_input_port(cls, "Use Y Title", (basic.Boolean, 'Apply Y-axis Label'))
reg.add_input_port(cls, "X Title", (basic.String, 'X-axis label'), True)
reg.add_input_port(cls, "Y Title", (basic.String, 'Y-axis label'), True)
reg.add_input_port(cls, "Title", (basic.String, 'Figure Title'))
reg.add_input_port(cls, "Point Size", (basic.Float, 'Point Size'), True)
reg.add_output_port(cls, "source", (basic.String, 'source'))
class LinePlot(ArrayPlot, Module):
'''
Create a standard line plot from a 1 or 2-dimensional Input Array.
If the Input Array is 2-dimensional, each row will be plotted as a
new line.
'''
def compute(self):
data = self.get_input("Input Array")
indexes = self.force_get_input("Indexes")
self.label_dict = None
use_legend = self.force_get_input("Legend")
randomcolors = self.force_get_input("Random Colors")
colors = self.force_get_input_list("Colors")
self.color_dict = None
markers = self.force_get_input_list("Markers")
self.marker_dict = None
x_label = self.force_get_input("X Title")
y_label = self.force_get_input("Y Title")
p_title = self.force_get_input("Title")
bg_color = self.force_get_input("Background")
array_x_t = self.force_get_input("Use X Title")
array_y_t = self.force_get_input("Use Y Title")
s = urllib.unquote(str(self.force_get_input("source", '')))
self.source = ''
da_ar = data.get_array()
if da_ar.ndim > 2:
raise ModuleError("Cannot plot data with dimensions > 2")
s = 'from pylab import *\n' +\
'from numpy import *\n' +\
'import numpy\n'
if bg_color == None:
bg_color = 'w'
if type(bg_color) == type(''):
s += 'figure(facecolor=\'' + bg_color + '\')\n'
else:
s += 'figure(facecolor=' + str(bg_color) + ')\n'
if da_ar.ndim == 1:
da_ar.shape = (1, da_ar.shape[0])
xar = self.force_get_input("X Values")
sf = self.force_get_input("Scaling Factor")
if sf == None:
sf = 1.
if xar == None:
start_i = None
end_i = None
if indexes == None:
start_i = 0
end_i = da_ar.shape[1]
else:
start_i = indexes[0]
end_i = indexes[1]
xar = numpy.arange(start_i, end_i)
xar = xar * sf
else:
xar = xar.get_array()
print da_ar.shape
print xar.shape
for i in range(da_ar.shape[0]):
lab = self.get_label(data, i)
col = self.get_color(colors, i, randomcolors)
mar = self.get_marker(markers, i)
if indexes == None:
s += 'plot(xar, da_ar[' + str(i) + ',:]'
else:
s += 'plot(xar, da_ar[' + str(i) + ',' + str(indexes[0]) + ':' + str(indexes[1]) + ']'
if lab != None:
s += ', label=\'' + lab +'\''
if col != None:
s += ', color=' + str(col)
if mar != None:
s += ', marker=\'' + mar + '\''
s += ')\n'
if use_legend:
s += 'legend()\n'
if array_x_t:
s += 'xlabel(\'' + data.get_domain_name() + '\')\n'
elif x_label:
s += 'xlabel(\'' + x_label + '\')\n'
if array_y_t:
s += 'ylabel(\'' + data.get_range_name() + '\')\n'
elif y_label:
s += 'ylabel(\'' + y_label + '\')\n'
if p_title:
s += 'title(\'' + p_title + '\')\n'
exec s
self.set_output("source", s)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.namespace)
@classmethod
def register_ports(cls, reg, basic):
reg.add_input_port(cls, "source", (basic.String, 'source'), True)
reg.add_input_port(cls, "Input Array", (NDArray, 'Array to Plot'))
reg.add_input_port(cls, "X Values", (NDArray, 'Domain Values'))
reg.add_input_port(cls, "Legend", (basic.Boolean, 'Use Legend'), True)
reg.add_input_port(cls, "Random Colors", (basic.Boolean, 'Assign Random Colors'), True)
reg.add_input_port(cls, "Colors", [basic.Integer, basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Markers", [basic.Integer, basic.String], True)
reg.add_input_port(cls, "Use X Title", (basic.Boolean, 'Apply X-axis Label'))
reg.add_input_port(cls, "Use Y Title", (basic.Boolean, 'Apply Y-axis Label'))
reg.add_input_port(cls, "X Title", (basic.String, 'X-axis label'), True)
reg.add_input_port(cls, "Y Title", (basic.String, 'Y-axis label'), True)
reg.add_input_port(cls, "Title", (basic.String, 'Figure Title'))
reg.add_input_port(cls, "Background", [basic.Float, basic.Float, basic.Float], True)
reg.add_input_port(cls, "Indexes", [basic.Integer, basic.Integer], True)
reg.add_input_port(cls, "Scaling Factor", (basic.Float, 'Scaling Factor'), True)
reg.add_output_port(cls, "source", (basic.String, 'source'))
| bsd-3-clause |
SHDShim/pytheos | examples/6_p_scale_test_Dorogokupets2015_Pt.py | 1 | 1417 |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('cat', '0Source_Citation.txt')
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# %matplotlib notebook # for interactive
# For high dpi displays.
# In[3]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# # 0. General note
# This example compares pressure calculated from `pytheos` and original publication for the platinum scale by Dorogokupets 2015.
# # 1. Global setup
# In[4]:
import matplotlib.pyplot as plt
import numpy as np
from uncertainties import unumpy as unp
import pytheos as eos
# # 3. Compare
# In[5]:
eta = np.linspace(1., 0.70, 7)
print(eta)
# In[6]:
dorogokupets2015_pt = eos.platinum.Dorogokupets2015()
# In[7]:
help(eos.platinum.Dorogokupets2015)
# In[8]:
dorogokupets2015_pt.print_equations()
# In[9]:
dorogokupets2015_pt.print_equations()
# In[10]:
dorogokupets2015_pt.print_parameters()
# In[11]:
v0 = 60.37930856339099
# In[12]:
dorogokupets2015_pt.three_r
# In[13]:
v = v0 * (eta)
temp = 3000.
# In[14]:
p = dorogokupets2015_pt.cal_p(v, temp * np.ones_like(v))
# In[15]:
print('for T = ', temp)
for eta_i, p_i in zip(eta, p):
print("{0: .3f} {1: .2f}".format(eta_i, p_i))
# The table is not given in this publication.
# In[16]:
v = dorogokupets2015_pt.cal_v(p, temp * np.ones_like(p), min_strain=0.6)
print((v/v0))
| apache-2.0 |
bibsian/database-development | poplerGUI/ui_logic_mainwindow.py | 1 | 11994 | #!/usr/bin/env python
from PyQt4 import QtGui, QtCore
from pandas import read_csv
import subprocess
import psutil
import time
import sys, os
from Views import ui_mainrefactor as mw
from poplerGUI import ui_logic_session as sesslogic
from poplerGUI import ui_logic_site as sitelogic
from poplerGUI import ui_logic_main as mainlogic
from poplerGUI import ui_logic_taxa as taxalogic
from poplerGUI import ui_logic_time as timelogic
from poplerGUI import ui_logic_obs as rawlogic
from poplerGUI import ui_logic_covar as covarlogic
from poplerGUI import ui_logic_climatesite as climsitelogic
from poplerGUI import ui_logic_widetolong as widetolonglogic
from poplerGUI import ui_logic_splitcolumn as splitcolumnlogic
from poplerGUI import ui_logic_replace as replacelogic
from poplerGUI import ui_logic_cbind as cbindlogic
from poplerGUI.logiclayer import class_userfacade as face
from poplerGUI import class_modelviewpandas as view
from poplerGUI import class_inputhandler as ini
from poplerGUI.logiclayer import class_helpers as hlp
from poplerGUI.logiclayer.datalayer.class_filehandles import Memento
from poplerGUI.logiclayer.datalayer import config as orm
rootpath = os.path.dirname(os.path.dirname( __file__ ))
metapath = os.path.join(rootpath, 'Cataloged_Data_Current_sorted.csv')
class UiMainWindow(QtGui.QMainWindow, mw.Ui_MainWindow):
'''
The main window class will serve to manage the display
of various dialog boxes, the facade class, model-viewer
tables, and menu actions.
'''
def __init__(self, parent=None):
super().__init__(parent)
# attributes
self.setupUi(self)
self.facade = face.Facade()
self._log = None
self.dsite = sitelogic.SiteDialog()
self.dsession = sesslogic.SessionDialog()
self.dmain = mainlogic.MainDialog()
self.dtaxa = taxalogic.TaxaDialog()
self.dtime = timelogic.TimeDialog()
self.draw = rawlogic.ObsDialog()
self.dcovar = covarlogic.CovarDialog()
self.dclimatesite = climsitelogic.ClimateSite()
self.dclimatesession = sesslogic.SessionDialog()
self.dwidetolong = widetolonglogic.WidetoLongDialog()
self.dsplitcolumn = splitcolumnlogic.SplitColumnDialog()
self.dreplacevalue = replacelogic.ReplaceValueDialog()
self.dcbind = cbindlogic.CbindDialog()
self.data_model = view.PandasTableModelEdit(None)
self.data_model.log_change.connect(self.write_to_log)
self.change_count = 0
# Actions
self.actionUndo.triggered.connect(self.undo_data_mod)
self.actionCombine_Columns.triggered.connect(
self.cbind_display)
self.actionReplace.triggered.connect(
self.replace_value_display)
self.actionConvert_Wide_to_Long.triggered.connect(
self.wide_to_long_display)
self.actionSplit_Column_By.triggered.connect(
self.split_column_display)
self.actionSiteTable.triggered.connect(self.site_display)
self.actionStart_Session.triggered.connect(
self.session_display)
self.actionEnd_Session.triggered.connect(
self.end_session)
self.actionMainTable.triggered.connect(self.main_display)
self.actionTaxaTable.triggered.connect(self.taxa_display)
self.actionTimeFormat.triggered.connect(self.time_display)
self.actionRawTable.triggered.connect(self.obs_display)
self.actionCovariates.triggered.connect(self.covar_display)
self.actionCommit.triggered.connect(self.commit_data)
self.actionClimateSiteTable.triggered.connect(
self.climate_site_display)
self.actionNew_Climate.triggered.connect(
self.climate_session_display)
self.mdiArea.addSubWindow(self.subwindow_2)
self.mdiArea.addSubWindow(self.subwindow_1)
# Custom Signals
self.dsite.site_unlocks.connect(self.site_complete_enable)
self.dwidetolong.update_data.connect(self.update_data_model)
self.dsplitcolumn.update_data.connect(self.update_data_model)
self.dreplacevalue.update_data.connect(self.update_data_model)
self.dcbind.update_data.connect(self.update_data_model)
self.dclimatesite.climatesite_unlocks.connect(
self.climate_site_complete_enabled)
self.dsession.raw_data_model.connect(
self.update_data_model)
self.dclimatesession.raw_data_model.connect(
self.update_data_model)
# Dialog boxes for user feedback
self.error = QtGui.QErrorMessage()
self.message = QtGui.QMessageBox
metadf = read_csv(metapath, encoding='iso-8859-11')
metamodel = view.PandasTableModel(
metadf[
[
'global_id', 'lter', 'title', 'site_metadata',
'temp_int'
]
]
)
self.tblViewMeta.setModel(metamodel)
self.tblViewMeta.resizeColumnsToContents()
self.tblViewRaw.horizontalHeader().sectionDoubleClicked.connect(
self.changeHorizontalHeader)
self.tblViewRaw.resizeColumnsToContents()
@staticmethod
def update_data_view(self):
self.data_model = view.PandasTableModelEdit(None)
self.data_model.set_data(self.facade._data)
self.tblViewRaw.setModel(self.data_model)
def undo_data_mod(self):
if self.facade.data_caretaker._statelist:
self.facade.data_originator.restore_from_memento(
self.facade.data_caretaker.restore()
)
self.facade._data = self.facade.data_originator._data.copy()
self.update_data_view(self)
else:
self.error.showMessage(
'No further undo'
)
@QtCore.pyqtSlot(object)
def update_data_model(self, dataframe_state):
''' Updating data model and facade instance with
other dialog boxes '''
self.change_count += 1
self.facade._data.fillna('NA', inplace=True)
new_dataframe_state = Memento(
self.facade._data.copy(),
'{}_{}'.format(dataframe_state, self.change_count)
)
self.facade.data_caretaker.save(new_dataframe_state)
self.facade.data_originator.restore_from_memento(
new_dataframe_state
)
self.update_data_view(self)
# Updating facade instances with dialog boxes
self.dsite.facade = self.facade
self.dclimatesite.facade = self.facade
@QtCore.pyqtSlot(object)
def write_to_log(self, dict_obj):
self.facade.create_log_record('changecell')
self._log = self.facade._tablelog['changecell']
hlp.write_column_to_log(
dict_obj, self._log, 'changecell')
@QtCore.pyqtSlot(object)
def update_webview(self, url):
print(url)
@QtCore.pyqtSlot(object)
def site_complete_enable(self):
'''
Method to enable actions for display dialog
boxes that corresond to different database tables
'''
self.actionMainTable.setEnabled(True)
self.actionTaxaTable.setEnabled(True)
self.actionTimeFormat.setEnabled(True)
self.actionRawTable.setEnabled(True)
self.actionCovariates.setEnabled(True)
self.update_data_model('Updating data')
def changeHorizontalHeader(self, index):
''' method to update data model when column headers
are changed '''
oldHeader = self.facade._data.iloc[:,index].name
newHeader, ok = QtGui.QInputDialog.getText(
self, 'Input', 'New Column Label:')
if ok:
self.facade._data.rename(
columns={oldHeader:newHeader}, inplace=True)
self.facade.create_log_record('changecolumn')
self._log = self.facade._tablelog['changecolumn']
hlp.write_column_to_log(
{
'column_changes':
{
oldHeader: newHeader}
},
self._log, 'changecolumn'
)
self.update_data_model('header_changes')
def cbind_display(self):
''' Displays dialog box to combine columns '''
self.dcbind.show()
self.dcbind.facade = self.facade
def replace_value_display(self):
''' Displays dialog box to split a column '''
self.dreplacevalue.show()
self.dreplacevalue.facade = self.facade
def split_column_display(self):
''' Displays dialog box to split a column '''
self.dsplitcolumn.show()
self.dsplitcolumn.facade = self.facade
def wide_to_long_display(self):
''' Displays dialog box to melt data '''
self.dwidetolong.show()
self.dwidetolong.facade = self.facade
def site_display(self):
''' Displays the Site Dialog box'''
self.dsite.show()
self.dsite.facade = self.facade
def addsite_display(self):
''' Display dialog box for adding site column'''
self.daddsite.show()
self.daddsite.facade = self.facade
def session_display(self):
''' Displays the Site Dialog box'''
self.dsession.show()
self.dsession.facade = self.facade
def main_display(self):
''' Displays main dialog box'''
self.dmain.facade = self.facade
self.dmain.show()
def taxa_display(self):
''' Display the Taxa Dialog box'''
self.dtaxa.facade = self.facade
self.dtaxa.show()
def time_display(self):
''' Display the Time Dialog box'''
self.dtime.facade = self.facade
self.dtime.show()
def obs_display(self):
''' Display the Raw Obs Dialog box'''
self.draw.facade = self.facade
self.draw.show()
def covar_display(self):
'''Display the Raw Obs Dialog box'''
self.dcovar.facade = self.facade
self.dcovar.show()
def commit_data(self):
''' Method to call the upload to database command '''
commithandle = ini.InputHandler(
name='updateinfo', tablename='updatetable')
self.facade.input_register(commithandle)
try:
self.facade.push_merged_data()
self.actionCommit.setEnabled(False)
self.message.about(
self, 'Status', 'Database transaction complete')
except Exception as e:
print(str(e))
self.facade._tablelog['project_table'].debug(str(e))
self.error.showMessage(
'Datbase transaction error: ' + str(e) +
'. May need to alter site abbreviations.')
raise ValueError(str(e))
# Below are dialog boxes and logic that relate to Climate data
def climate_site_display(self):
''' Displays the Site Dialog box'''
self.dclimatesite.show()
self.dclimatesite.facade = self.facade
@QtCore.pyqtSlot(object)
def climate_site_complete_enabled(self, datamod2):
self.actionClimateRawTable.setEnabled(True)
self.update_data_model()
def climate_session_display(self):
''' Displays the Climate session dialog box'''
self.dclimatesession.show()
self.dclimatesession.facade = self.facade
self.actionSiteTable.setEnabled(False)
self.actionClimateSiteTable.setEnabled(True)
metapath = (
str(os.getcwd()) +
'/Datasets_manual_test/meta_climate_test.csv')
metadf = read_csv(metapath, encoding='iso-8859-11')
metamodel = view.PandasTableModel(metadf)
self.tblViewMeta.setModel(metamodel)
def end_session(self):
orm.conn.close()
subprocess.call(
"python" + " poplerGUI_run_main.py", shell=True)
self.close()
try:
PROCNAME = "python.exe"
for proc in psutil.process_iter():
if proc.name() == PROCNAME:
proc.kill()
except:
pass
| mit |
aurelieladier/openturns | python/doc/pyplots/UserDefinedCovarianceModel.py | 2 | 1077 | import openturns as ot
from math import exp
from matplotlib import pyplot as plt
from openturns.viewer import View
def C(s, t):
return exp(-4.0 * abs(s - t) / (1 + (s * s + t * t)))
N = 64
a = 4.0
#myMesh = ot.IntervalMesher([N]).build(ot.Interval(-a, a))
myMesh = ot.RegularGrid(-a, 2 * a / N, N + 1)
myCovarianceCollection = ot.CovarianceMatrixCollection()
for k in range(myMesh.getVerticesNumber()):
t = myMesh.getVertices()[k]
for l in range(k + 1):
s = myMesh.getVertices()[l]
matrix = ot.CovarianceMatrix(1)
matrix[0, 0] = C(s[0], t[0])
myCovarianceCollection.add(matrix)
covarianceModel = ot.UserDefinedCovarianceModel(myMesh, myCovarianceCollection)
def f(x):
return [covarianceModel([x[0]], [x[1]])[0, 0]]
func = ot.PythonFunction(2, 1, f)
func.setDescription(['$s$', '$t$', '$cov$'])
cov_graph = func.draw([-a] * 2, [a] * 2, [512] * 2)
fig = plt.figure(figsize=(10, 4))
plt.suptitle('User defined covariance model')
cov_axis = fig.add_subplot(111)
View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
| lgpl-3.0 |
MTgeophysics/mtpy | mtpy/imaging/plotstations.py | 1 | 22452 | # -*- coding: utf-8 -*-
"""
===============
PlotStations
===============
Plots station locations in map view.
Created on Fri Jun 07 18:20:00 2013
@author: jpeacock-pr
"""
#==============================================================================
import matplotlib.pyplot as plt
import numpy as np
import os
import mtpy.imaging.mtplottools as mtpt
import mtpy.utils.exceptions as mtex
#==============================================================================
class PlotStations(object):
"""
plot station locations in map view.
Need to input one of the following lists:
Arguments:
----------
**fn_list** : list of strings
full paths to .edi files to plot. *default* is None
**mt_object** : class mtpy.imaging.mtplot.MTplot
object of mtpy.imaging.mtplot.MTplot
*default* is None
Optional Key Words:
-------------------
*fig_dpi*: float
dots per inch resolution of figure. *default* is 300.
*fig_num*: int
number of figure instance. *default* is 1.
*fig_size*: [x, y]
figure dimensions in inches. *default* is None.
*font_size*: float
size of tick labels, axes labels will be +2.
*default* is 7
*image_extent*: (xmin, xmax, ymin, ymax)
extent of image in map coordinates, must be input if
image_file is not None. *default* is None.
*image_file*: string
full path to base image file, can be .jpg, .png, or .svg
*default* is None.
*map_scale*: [ 'latlon' | 'eastnorth' | 'eastnorthkm' ]
scale of map, either in:
- 'latlon' --> latitude and longitude in decimal
degrees
- 'eastnorth' --> easting and northing in meters
- 'eastnorthkm' --> easting and northing in kilometers
*marker*: string or int
type of marker used to represent station location. For all
marker options:
============================== ================================
marker description
============================== ================================
``7`` caretdown
``4`` caretleft
``5`` caretright
``6`` caretup
``'o'`` circle
``'D'`` diamond
``'h'`` hexagon1
``'H'`` hexagon2
``'_'`` hline
``''`` nothing
``'None'`` nothing
``None`` nothing
``' '`` nothing
``'8'`` octagon
``'p'`` pentagon
``','`` pixel
``'+'`` plus
``'.'`` point
``'s'`` square
``'*'`` star
``'d'`` thin_diamond
``3`` tickdown
``0`` tickleft
``1`` tickright
``2`` tickup
``'1'`` tri_down
``'3'`` tri_left
``'4'`` tri_right
``'2'`` tri_up
``'v'`` triangle_down
``'<'`` triangle_left
``'>'`` triangle_right
``'^'`` triangle_up
``'|'`` vline
``'x'`` x
``'$...$'`` render the string using mathtext
============================== ================================
*marker_color*: string or (red, green, blue) on a scale of 0 to 1.
color of station marker. *default* is black
*marker_size*: float
size of station marker in points. *default* is 10.
*plot_names*: [ True | False ]
plot station names next to marker. *default* is True.
*plot_title*: string
title of plot
*plot_yn*: [ 'y' | 'n' ]
plot on initialization. *default* is 'y'.
*ref_point*: (x, y)
reference point to center map on. *default* is (0, 0).
*text_angle*: float
angle of station label in degrees. *default* is 0.
*text_color*: string or (red, green, blue) on a scale [0,1]
color of station label. *default* is black.
*text_ha*: [ 'center' | 'right' | 'left' ]
horizontal alignment of station label relative to the
station location. *default* is 'center'.
*text_pad*: float
padding from station marker to station label in map
units.
*text_size*: float
font size of station label. *default* is 7.
*text_va*: [ 'center' | 'top' | 'bottom' | 'baseline' ]
vertical alignment of station label. *default* is 'baseline'
*text_weight*: [ 'ultralight' | 'light' | 'normal' | 'regular' |
'book' | 'medium' | 'roman' | 'semibold' |
'demibold' | 'demi' | 'bold' | 'heavy' |
'extra bold' | 'black' ]
weight of station label. *default* is 'normal'.
*xlimits*: (xmin, xmax)
limits of map in east-west direction. *default* is None,
which computes limits from data.
*ylimits*: (ymin, ymax)
limits of map in north-south direction. *default* is None,
which computes limits from data.
:Example: ::
>>> import mtpy.imaging.mtplot as mtplot
>>> import os
>>> edipath = '/home/MT/edifiles'
>>> edilist = [os.path.join(edipath, edi)
>>> ... for edi in os.listdir(edipath)
>>> ... if edi.find('.edi')>0]
>>> ps1 = mtplot.plot_station_locations(fn_list=edilist)
>>> # change station label padding and properties
>>> ps1.text_pad = .001
>>> ps1.text_angle = 60
>>> ps1.text_size = 8
>>> ps1.text_color = (.5, .5, 0) #orangeish
>>> ps1.redraw_plot()
>>> ps1.save_plot('/home/MT/figures', file_format='pdf')
saved figure to '/home/MT/figures/station_map.pdf'
=================== =======================================================
Attributes Description
=================== =======================================================
ax matplotlib.axes instance of station map
fig matplotlib.figure instance of map figure
fig_dpi dots-per-inch resolution of figure
fig_num number of figure instance
fig_size size of figure in inches
font_size font size of tick labels
image_extent (xmin, xmax, ymin, ymax) extent of image if input
image_file full path to base image file, can be .jpg, .png or .svg
map_scale [ 'latlon' | 'eastnorth' | 'eastnorthkm' ] map scale
marker station marker, see above for options
marker_color color of marker
marker_size size of marker in points
mt_list list of mtpy.imaging.mtplottools.MTplot instances
plot_names [ True | False ] plot station names next to markers
plot_title title of plot
plot_yn [ 'y' | 'n' ] plot on initializing PlotStations
ref_point reference point to center map on.
stationid (index0, index1) to get station label from station name
text_angle angle of station label
text_color color of station label
text_ha horizontal alignment of station label, see above
text_pad padding of station label in y direction
text_size font size of station label
text_va vertical alignment of station label
text_weight font weight of station label
xlimits limits of map in east-west direction
ylimits limits of map in north-south direction
=================== =======================================================
Methods:
---------
* *plot*: plots the pseudosection according to keywords
* *redraw_plot*: redraws the plot, use if you change some of the
attributes.
* *update_plot*: updates the plot, use if you change some of the
axes attributes, figure needs to be open to update.
* *save_plot*: saves the plot to given filepath.
"""
def __init__(self, **kwargs):
fn_list = kwargs.pop('fn_list', None)
mt_object_list = kwargs.pop('mt_object_list', None)
#----set attributes for the class-------------------------
self.mt_list = mtpt.MTplot_list(fn_list=fn_list,
mt_object_list=mt_object_list)
#--> set plot properties
self.fig_num = kwargs.pop('fig_num', 1)
self.plot_title = kwargs.pop('plot_title', None)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_size = kwargs.pop('fig_size', None)
self.font_size = kwargs.pop('font_size', 7)
self.stationid = kwargs.pop('stationid', [0, 4])
self.xlimits = kwargs.pop('xlimits', None)
self.ylimits = kwargs.pop('ylimits', None)
self.ref_point = kwargs.pop('ref_point', (0, 0))
self.map_scale = kwargs.pop('map_scale', 'latlon')
self.marker = kwargs.pop('marker', 'v')
self.marker_size = kwargs.pop('marker_size', 10)
self.marker_color = kwargs.pop('marker_color', 'k')
self.plot_names = kwargs.pop('plot_names', True)
self.text_size = kwargs.pop('text_size', 7)
self.text_weight = kwargs.pop('text_weight', 'normal')
self.text_color = kwargs.pop('text_color', 'k')
self.text_ha = kwargs.pop('text_ha', 'center')
self.text_va = kwargs.pop('text_va', 'baseline')
self.text_angle = kwargs.pop('text_angle', 0)
self.text_x_pad = kwargs.pop('text_x_pad', None)
self.text_y_pad = kwargs.pop('text_y_pad', None)
self.image_file = kwargs.pop('image_file', None)
self.image_extent = kwargs.pop('image_extent', None)
if self.image_file is not None:
if self.image_extent is None:
raise mtex.MTpyError_inputarguments('Need to input extents ' +
'of the image as' +
'(x0, y0, x1, y1)')
#--> plot if desired
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def plot(self):
"""
plots the station locations
"""
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = .09
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .09
plt.rcParams['figure.subplot.top'] = .98
# get station locations
self.mt_list.get_station_locations(map_scale=self.map_scale,
ref_point=self.ref_point)
text_dict = {'size': self.text_size,
'weight': self.text_weight,
'rotation': self.text_angle,
'color': self.text_color}
font_dict = {'size': self.font_size + 2, 'weight': 'bold'}
if self.xlimits is None:
if np.sign(self.mt_list.map_xarr.min()) == -1:
self.xlimits = (self.mt_list.map_xarr.min() * 1.002,
self.mt_list.map_xarr.max() * .998)
else:
self.xlimits = (self.mt_list.map_xarr.min() * .998,
self.mt_list.map_xarr.max() * 1.002)
if self.ylimits is None:
if np.sign(self.mt_list.map_yarr.min()) == -1:
self.ylimits = (self.mt_list.map_yarr.min() * 1.002,
self.mt_list.map_yarr.max() * .998)
else:
self.ylimits = (self.mt_list.map_yarr.min() * .998,
self.mt_list.map_yarr.max() * 1.002)
if self.map_scale == 'latlon':
xlabel = 'Longitude (deg)'
ylabel = 'Latitude (deg)'
elif self.map_scale == 'eastnorth':
xlabel = 'Easting (m)'
ylabel = 'Northing (m)'
elif self.map_scale == 'eastnorthkm':
xlabel = 'Easting (km)'
ylabel = 'Northing (km)'
# make a figure instance
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
#add and axes
self.ax = self.fig.add_subplot(1, 1, 1, aspect='equal')
#--> plot the background image if desired-----------------------
if self.image_file is not None:
im = plt.imread(self.image_file)
self.ax.imshow(im, origin='lower', extent=self.image_extent,
aspect='auto')
for key in list(self.mt_list.map_dict.keys()):
self.ax.scatter(self.mt_list.map_dict[key][0],
self.mt_list.map_dict[key][1],
marker=self.marker,
c=self.marker_color,
s=self.marker_size)
if self.plot_names == True:
if self.text_x_pad is None:
self.text_x_pad = .0009 * self.mt_list.map_dict[key][0]
if self.text_y_pad is None:
self.text_y_pad = .0009 * self.mt_list.map_dict[key][1]
self.ax.text(self.mt_list.map_dict[key][0] + self.text_x_pad,
self.mt_list.map_dict[key][1] + self.text_y_pad *
np.sign(self.mt_list.map_dict[key][1]),
key[self.stationid[0]:self.stationid[1]],
verticalalignment=self.text_va,
horizontalalignment=self.text_ha,
fontdict=text_dict)
# set axis properties
self.ax.set_xlabel(xlabel, fontdict=font_dict)
self.ax.set_ylabel(ylabel, fontdict=font_dict)
self.ax.grid(alpha=.35, color=(.25, .25, .25))
self.ax.set_xlim(self.xlimits)
self.ax.set_ylim(self.ylimits)
plt.show()
def write_station_locations(self, save_path=None):
"""
Write text file containing station locations in map coordinates and
relative to ref_point.
Arguments:
----------
**save_path**: string
full path to folder to save file, or full path to
the file to save to. *default* is None, which uses
the directory path of files used to plot.
Returns:
---------
**fn_save_path**: string
full path to text file
"""
if save_path is None:
try:
svpath = os.path.dirname(self.mt_list.mt_list[0].fn)
except TypeError:
raise IOError('Need to input save_path, could not find a path')
else:
svpath = save_path
if self.map_scale == 'latlon':
hdr_list = ['Station', 'Longitude(deg)', 'Latitude(deg)',
'Elevation(m)']
elif self.map_scale == 'eastnorth':
hdr_list = ['Station', 'Easting(m)', 'Northing(m)',
'Elevation(m)']
elif self.map_scale == 'eastnorthkm':
hdr_list = ['Station', 'Easting(km)', 'Northing(km)',
'Elevation(m)']
self.mt_list.get_station_locations(map_scale=self.map_scale,
ref_point=self.ref_point)
fn_svpath = os.path.join(svpath, 'StationLocations_{0}.txt'.format(
self.map_scale))
tfid = file(fn_svpath, 'w')
hdr_str = ['{0:<15}'.format(hdr_list[0])] +\
['{0:^15}'.format(hh) for hh in hdr_list[1:]] + ['\n']
tfid.write(''.join(hdr_str))
for ss in list(self.mt_list.map_dict.keys()):
x = self.mt_list.map_dict[ss][0]
y = self.mt_list.map_dict[ss][1]
z = self.mt_list.map_dict[ss][2]
if self.map_scale == 'latlon':
tline = '{0:<15}{1: ^15.3f}{2: ^15.3f}{3: ^15.1f}\n'.format(ss,
x,
y,
z)
else:
tline = '{0:<15}{1: ^15.1f}{2: ^15.1f}{3: ^15.1f}\n'.format(ss,
x,
y,
z)
tfid.write(tline)
tfid.close()
print('Saved file to: ', fn_svpath)
def save_plot(self, save_fn, file_format='pdf',
orientation='portrait', fig_dpi=None, close_plot='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_ResPhase.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotPhaseTensorMaps(edilist,freqspot=10)
>>> p1.save_plot(r'/home/MT', file_format='jpg')
'Figure saved to /home/MT/PTMaps/PTmap_phimin_10Hz.jpg'
"""
sf = '_{0:.6g}'.format(self.plot_freq)
if fig_dpi is None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
plt.clf()
plt.close(self.fig)
else:
if not os.path.exists(save_fn):
os.mkdir(save_fn)
if not os.path.exists(os.path.join(save_fn, 'station_map')):
os.mkdir(os.path.join(save_fn, 'station_map'))
save_fn = os.path.join(save_fn, 'station_map')
save_fn = os.path.join(save_fn, 'PTmap_' + self.ellipse_colorby + sf +
'Hz.' + file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
if close_plot == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print('Saved figure to: ' + self.fig_fn)
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> [ax.grid(True, which='major') for ax in [p1.axr,p1.axp]]
>>> p1.update_plot()
"""
self.fig.canvas.draw()
def redraw_plot(self):
"""
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> p1.xy_color = (.5,.5,.9)
>>> p1.xy_marker = '*'
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def __str__(self):
return "Plot station locations"
| gpl-3.0 |
finfou/tushare | tushare/stock/reference.py | 2 | 25190 | # -*- coding:utf-8 -*-
"""
投资参考数据接口
Created on 2015/03/21
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
from tushare.util import dateu as dt
import pandas as pd
import time
import lxml.html
from lxml import etree
import re
import json
from pandas.compat import StringIO
from tushare.util import dateu as du
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def profit_data(year=2014, top=25,
retry_count=3, pause=0.001):
"""
获取分配预案数据
Parameters
--------
year:年份
top:取最新n条数据,默认取最近公布的25条
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
returns
-------
DataFrame
code:股票代码
name:股票名称
year:分配年份
report_date:公布日期
divi:分红金额(每10股)
shares:转增和送股数(每10股)
"""
if top <= 25:
df, pages = _dist_cotent(year, 0, retry_count, pause)
return df.head(top)
elif top == 'all':
ct._write_head()
df, pages = _dist_cotent(year, 0, retry_count, pause)
for idx in range(1,int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df
else:
if isinstance(top, int):
ct._write_head()
allPages = top/25+1 if top%25>0 else top/25
df, pages = _dist_cotent(year, 0, retry_count, pause)
if int(allPages) < int(pages):
pages = allPages
for idx in range(1, int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df.head(top)
else:
print(ct.TOP_PARAS_MSG)
def _fun_divi(x):
if ct.PY3:
reg = re.compile(r'分红(.*?)元', re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
if isinstance(x, unicode):
s1 = unicode('分红','utf-8')
s2 = unicode('元','utf-8')
reg = re.compile(r'%s(.*?)%s'%(s1, s2), re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
return 0
def _fun_into(x):
if ct.PY3:
reg1 = re.compile(r'转增(.*?)股', re.UNICODE)
reg2 = re.compile(r'送股(.*?)股', re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
if isinstance(x, unicode):
s1 = unicode('转增','utf-8')
s2 = unicode('送股','utf-8')
s3 = unicode('股','utf-8')
reg1 = re.compile(r'%s(.*?)%s'%(s1, s3), re.UNICODE)
reg2 = re.compile(r'%s(.*?)%s'%(s2, s3), re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
return 0
def _dist_cotent(year, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
if pageNo > 0:
ct._write_console()
html = lxml.html.parse(rv.DP_163_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163dp'], year, pageNo))
res = html.xpath('//div[@class=\"fn_rp_list\"]/table')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows=[0])[0]
df = df.drop(df.columns[0], axis=1)
df.columns = rv.DP_163_COLS
df['divi'] = df['plan'].map(_fun_divi)
df['shares'] = df['plan'].map(_fun_into)
df = df.drop('plan', axis=1)
df['code'] = df['code'].astype(object)
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
pages = []
if pageNo == 0:
page = html.xpath('//div[@class=\"mod_pages\"]/a')
if len(page)>1:
asr = page[len(page)-2]
pages = asr.xpath('text()')
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, pages[0] if len(pages)>0 else 0
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def forecast_data(year, quarter):
"""
获取业绩预告数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
type,业绩变动类型【预增、预亏等】
report_date,发布日期
pre_eps,上年同期每股收益
range,业绩变动范围
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_forecast_data(year, quarter, 1, pd.DataFrame())
df = pd.DataFrame(data, columns=ct.FORECAST_COLS)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _get_forecast_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
html = lxml.html.parse(ct.FORECAST_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year, quarter, pageNo,
ct.PAGE_NUM[1]))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('--', '0')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([4, 5, 8], axis=1)
df.columns = ct.FORECAST_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+',nextPage[0])[0]
return _get_forecast_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = dt.get_year() if year is None else year
month = dt.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def fund_holdings(year, quarter,
retry_count=3, pause=0.001):
"""
获取基金持股数据
Parameters
--------
year:年份e.g 2014
quarter:季度(只能输入1,2,3,4这个四个数字)
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:报告日期
nums:基金家数
nlast:与上期相比(增加或减少了)
count:基金持股数(万股)
clast:与上期相比
amount:基金持股市值
ratio:占流通盘比率
"""
start,end = rv.QUARTS_DIC[str(quarter)]
if quarter == 1:
start = start % str(year-1)
end = end%year
else:
start, end = start%year, end%year
ct._write_head()
df, pages = _holding_cotent(start, end, 0, retry_count, pause)
for idx in range(1, pages):
df = df.append(_holding_cotent(start, end, idx, retry_count, pause),
ignore_index=True)
return df
def _holding_cotent(start, end, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
if pageNo>0:
ct._write_console()
try:
request = Request(rv.FUND_HOLDS_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163fh'], ct.PAGES['163fh'],
pageNo, start, end, _random(5)))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines.replace('--', '0')
lines = json.loads(lines)
data = lines['list']
df = pd.DataFrame(data)
df = df.drop(['CODE', 'ESYMBOL', 'EXCHANGE', 'NAME', 'RN', 'SHANGQIGUSHU',
'SHANGQISHIZHI', 'SHANGQISHULIANG'], axis=1)
for col in ['GUSHU', 'GUSHUBIJIAO', 'SHIZHI', 'SCSTC27']:
df[col] = df[col].astype(float)
df['SCSTC27'] = df['SCSTC27']*100
df['GUSHU'] = df['GUSHU']/10000
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO']/10000
df['SHIZHI'] = df['SHIZHI']/10000
df['GUSHU'] = df['GUSHU'].map(ct.FORMAT)
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO'].map(ct.FORMAT)
df['SHIZHI'] = df['SHIZHI'].map(ct.FORMAT)
df['SCSTC27'] = df['SCSTC27'].map(ct.FORMAT)
df.columns = rv.FUND_HOLDS_COLS
df = df[['code', 'name', 'date', 'nums', 'nlast', 'count',
'clast', 'amount', 'ratio']]
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, int(lines['pagecount'])
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def new_stocks(retry_count=3, pause=0.001):
"""
获取新股上市数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
ipo_date:上网发行日期
issue_date:上市日期
amount:发行数量(万股)
markets:上网发行数量(万股)
price:发行价格(元)
pe:发行市盈率
limit:个人申购上限(万股)
funds:募集资金(亿元)
ballot:网上中签率(%)
"""
data = pd.DataFrame()
ct._write_head()
df = _newstocks(data, 1, retry_count,
pause)
return df
def _newstocks(data, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
html = lxml.html.parse(rv.NEW_STOCKS_URL%(ct.P_TYPE['http'],ct.DOMAINS['vsf'],
ct.PAGES['newstock'], pageNo))
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.drop([df.columns[idx] for idx in [1, 12, 13, 14]], axis=1)
df.columns = rv.NEW_STOCKS_COLS
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
res = html.xpath('//table[@class=\"table2\"]/tr[1]/td[1]/a/text()')
tag = '下一页' if ct.PY3 else unicode('下一页', 'utf-8')
hasNext = True if tag in res else False
data = data.append(df, ignore_index=True)
pageNo += 1
if hasNext:
data = _newstocks(data, pageNo, retry_count, pause)
except Exception as ex:
print(ex)
else:
return data
def sh_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取沪市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 为空时取去年今日
end:string
结束日期 format:YYYY-MM-DD 为空时取当前日期
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rqyl: 本日融券余量
rqylje: 本日融券余量金额(元)
rqmcl: 本日融券卖出量
rzrqjyzl:本日融资融券余额(元)
"""
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
if du.diff_day(start, end) < 0:
return None
start, end = start.replace('-', ''), end.replace('-', '')
data = pd.DataFrame()
ct._write_head()
df = _sh_hz(data, start=start, end=end,
retry_count=retry_count,
pause=pause)
return df
def _sh_hz(data, start=None, end=None,
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = rv.MAR_SH_HZ_TAIL_URL%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
url = rv.MAR_SH_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5),
start, end, tail,
_random())
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(url, ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_HZ_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_hz(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sh_margin_details(date='', symbol='',
start='', end='',
retry_count=3, pause=0.001):
"""
获取沪市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
symbol:string
标的代码,6位数字e.g.600848,默认为空
start:string
开始日期 format:YYYY-MM-DD 默认为空''
end:string
结束日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rzche:本日融资偿还额(元)
rqyl: 本日融券余量
rqmcl: 本日融券卖出量
rqchl: 本日融券偿还量
"""
date = date if date == '' else date.replace('-', '')
start = start if start == '' else start.replace('-', '')
end = end if end == '' else end.replace('-', '')
if (start != '') & (end != ''):
date = ''
data = pd.DataFrame()
ct._write_head()
df = _sh_mx(data, date=date, start=start,
end=end, symbol=symbol,
retry_count=retry_count,
pause=pause)
return df
def _sh_mx(data, date='', start='', end='',
symbol='',
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = '&pageHelp.pageNo=%s&pageHelp.beginPage=%s&pageHelp.endPage=%s'%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.MAR_SH_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5), date,
symbol, start, end, tail,
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
if pagecount == 0:
return data
if pageNo == 6:
ct._write_tips(lines['pageHelp'].get('total'))
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_MX_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_mx(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取深市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 默认为上一周的今天
end:string
结束日期 format:YYYY-MM-DD 默认为今日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期(index)
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
data = pd.DataFrame()
if start is None and end is None:
end = du.today()
start = du.day_last_week()
if start is None or end is None:
ct._write_msg(rv.MAR_SZ_HZ_MSG2)
return None
try:
date_range = pd.date_range(start=start, end=end, freq='B')
if len(date_range)>261:
ct._write_msg(rv.MAR_SZ_HZ_MSG)
else:
ct._write_head()
for date in date_range:
data = data.append(_sz_hz(str(date.date()), retry_count, pause) )
except:
ct._write_msg(ct.DATA_INPUT_ERROR_MSG)
else:
return data
def _sz_hz(date='', retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
request = Request(rv.MAR_SZ_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_HZ_COLS
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
robbwagoner/airflow | airflow/hooks/base_hook.py | 5 | 1379 | import logging
import random
from airflow import settings
from airflow.models import Connection
from airflow.utils import AirflowException
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
def get_connections(self, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
def get_connection(self, conn_id):
conn = random.choice(self.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
def get_conn(self):
raise NotImplemented()
def get_records(self, sql):
raise NotImplemented()
def get_pandas_df(self, sql):
raise NotImplemented()
def run(self, sql):
raise NotImplemented()
| apache-2.0 |
JT5D/scikit-learn | sklearn/tree/tree.py | 2 | 31720 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import array2d, check_random_state
from ..utils.fixes import unique
from ..utils.validation import check_arrays
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
max_leaf_nodes,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True,
sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if X_argsorted is not None:
warn("The X_argsorted parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
if check_input:
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = (2 ** 31) - 1 if self.max_depth is None else self.max_depth
max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than 0 or "
"larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_,
self.n_outputs_, splitter, max_depth,
min_samples_split, self.min_samples_leaf,
max_leaf_nodes, random_state)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder()
else:
builder = BestFirstTreeBuilder()
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(DecisionTreeClassifier, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(ExtraTreeClassifier, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
| bsd-3-clause |
biocore/qiime | qiime/compare_categories.py | 15 | 7131 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Michael Dwan", "Logan Knecht",
"Damien Coy", "Levi McCracken"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
from os.path import join
from types import ListType
import pandas as pd
from skbio.stats.distance import DistanceMatrix
from skbio.stats.distance import anosim, permanova, bioenv
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import get_qiime_temp_dir, MetadataMap, RExecutor
methods = ['adonis', 'anosim', 'bioenv', 'morans_i', 'mrpp', 'permanova',
'permdisp', 'dbrda']
def compare_categories(dm_fp, map_fp, method, categories, num_perms, out_dir):
"""Runs the specified statistical method using the category of interest.
This method does not return anything; all output is written to results
files in out_dir.
Arguments:
dm_fp - filepath to the input distance matrix
map_fp - filepath to the input metadata mapping file
categories - list of categories in the metadata mapping file to
consider in the statistical test. Multiple categories will only be
considered if method is 'bioenv', otherwise only the first category
will be considered
num_perms - the number of permutations to use when calculating the
p-value. If method is 'bioenv' or 'morans_i', this parameter will
be ignored as they are not permutation-based methods
out_dir - path to the output directory where results files will be
written. It is assumed that this directory already exists and we
have write permissions to it
"""
# Make sure we were passed a list of categories, not a single string.
if not isinstance(categories, ListType):
raise TypeError("The supplied categories must be a list of "
"strings.")
# Special case: we do not allow SampleID as it is not a category, neither
# in data structure representation nor in terms of a statistical test (no
# groups are formed since all entries are unique IDs).
if 'SampleID' in categories:
raise ValueError("Cannot use SampleID as a category because it is a "
"unique identifier for each sample, and thus does "
"not create groups of samples (nor can it be used as "
"a numeric category in Moran's I or BIO-ENV "
"analyses). Please choose a different metadata "
"column to perform statistical tests on.")
dm = DistanceMatrix.read(dm_fp)
if method in ('anosim', 'permanova', 'bioenv'):
with open(map_fp, 'U') as map_f:
md_dict = parse_mapping_file_to_dict(map_f)[0]
df = pd.DataFrame.from_dict(md_dict, orient='index')
out_fp = join(out_dir, '%s_results.txt' % method)
if method in ('anosim', 'permanova'):
if method == 'anosim':
method_fn = anosim
elif method == 'permanova':
method_fn = permanova
results = method_fn(dm, df, column=categories[0],
permutations=num_perms)
elif method == 'bioenv':
results = bioenv(dm, df, columns=categories)
results.to_csv(out_fp, sep='\t')
else:
# Remove any samples from the mapping file that aren't in the distance
# matrix (important for validation checks). Use strict=True so that an
# error is raised if the distance matrix contains any samples that
# aren't in the mapping file.
with open(map_fp, 'U') as map_f:
md_map = MetadataMap.parseMetadataMap(map_f)
md_map.filterSamples(dm.ids, strict=True)
# These methods are run in R. Input validation must be done here before
# running the R commands.
if method in ['adonis', 'morans_i', 'mrpp', 'permdisp', 'dbrda']:
# Check to make sure all categories passed in are in mapping file
# and are not all the same value.
for category in categories:
if not category in md_map.CategoryNames:
raise ValueError("Category '%s' not found in mapping file "
"columns." % category)
if md_map.hasSingleCategoryValue(category):
raise ValueError("All values in category '%s' are the "
"same. The statistical method '%s' "
"cannot operate on a category that "
"creates only a single group of samples "
"(e.g. there are no 'between' distances "
"because there is only a single group)."
% (category, method))
# Build the command arguments string.
command_args = ['-d %s -m %s -c %s -o %s'
% (dm_fp, map_fp, categories[0], out_dir)]
if method == 'morans_i':
# Moran's I requires only numeric categories.
for category in categories:
if not md_map.isNumericCategory(category):
raise TypeError("The category '%s' is not numeric. "
"Not all values could be converted to "
"numbers." % category)
else:
# The rest require groups of samples, so the category values
# cannot all be unique.
for category in categories:
if (md_map.hasUniqueCategoryValues(category) and not
(method == 'adonis' and
md_map.isNumericCategory(category))):
raise ValueError("All values in category '%s' are "
"unique. This statistical method "
"cannot operate on a category with "
"unique values (e.g. there are no "
"'within' distances because each "
"group of samples contains only a "
"single sample)." % category)
# Only Moran's I doesn't accept a number of permutations.
if num_perms < 0:
raise ValueError("The number of permutations must be "
"greater than or equal to zero.")
command_args[0] += ' -n %d' % num_perms
rex = RExecutor(TmpDir=get_qiime_temp_dir())
rex(command_args, '%s.r' % method)
else:
raise ValueError("Unrecognized method '%s'. Valid methods: %r"
% (method, methods))
| gpl-2.0 |
szrg/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
B3AU/waveTree | examples/cluster/plot_affinity_propagation.py | 12 | 2282 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
pl.plot(X[class_members, 0], X[class_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
pl.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| bsd-3-clause |
ubccr/tacc_stats | analyze/process_pickles/aggregate_jobs.py | 1 | 2428 | #!/usr/bin/env python
import analyze_conf
import sys
import datetime, glob, job_stats, os, subprocess, time
import itertools, argparse
import matplotlib
if not 'matplotlib.pyplot' in sys.modules:
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy
import tspl, tspl_utils
import multiprocessing, functools
import pprint
def get_samples(fn,times):
try:
ts=tspl.TSPLSum(fn,['lnet'],['tx_bytes'])
except tspl.TSPLException as e:
return
times.append(sorted(list(ts.j.times)))
def get_lnet_data_file(fn,k1,k2,samples,histories):
try:
ts=tspl.TSPLSum(fn,k1,k2)
except tspl.TSPLException as e:
return
histories[ts.j.id]=tspl_utils.global_interp_data(ts,samples)
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-p', help='Set number of processes',
nargs=1, type=int, default=[1])
parser.add_argument('-k1', help='Set first key',
nargs='+', type=str, default=['amd64_sock'])
parser.add_argument('-k2', help='Set second key',
nargs='+', type=str, default=['DRAM'])
parser.add_argument('-f', help='File, directory, or quoted'
' glob pattern', nargs=1, type=str, default=['jobs'])
n=parser.parse_args()
filelist=tspl_utils.getfilelist(n.f[0])
procs=min(len(filelist),n.p[0])
m = multiprocessing.Manager()
histories = m.dict()
times = m.list()
print 'Getting samples'
partial_get_samples=functools.partial(get_samples,times=times)
pool=multiprocessing.Pool(processes=procs)
pool.map(partial_get_samples,filelist)
pool.close()
pool.join()
samples=set([])
for t in times:
samples=samples.union(t)
samples=numpy.array(sorted(samples))
# samples=numpy.array(range(1349067600,1352440800+1,3600))
print len(samples)
partial_glndf=functools.partial(get_lnet_data_file,k1=n.k1,k2=n.k2,
samples=samples,histories=histories)
print 'Getting data'
pool=multiprocessing.Pool(processes=procs)
pool.map(partial_glndf,filelist)
pool.close()
pool.join()
accum=numpy.zeros(len(samples))
for h in histories.values():
accum+=h
print 'Plotting'
fig,ax=plt.subplots(1,1,dpi=80)
t=numpy.array([float(x) for x in samples])
t-=t[0]
ax.plot(t[:-1]/3600.,numpy.diff(accum)/numpy.diff(t))
fig.savefig('bar')
plt.close()
if __name__ == '__main__':
main()
| lgpl-2.1 |
benhamner/ASAP-AES | Benchmarks/length_benchmark.py | 2 | 3284 | #!/usr/bin/env python2.7
import re
from sklearn.ensemble import RandomForestRegressor
def add_essay_training(data, essay_set, essay, score):
if essay_set not in data:
data[essay_set] = {"essay":[],"score":[]}
data[essay_set]["essay"].append(essay)
data[essay_set]["score"].append(score)
def add_essay_test(data, essay_set, essay, prediction_id):
if essay_set not in data:
data[essay_set] = {"essay":[], "prediction_id":[]}
data[essay_set]["essay"].append(essay)
data[essay_set]["prediction_id"].append(prediction_id)
def read_training_data(training_file):
f = open(training_file)
f.readline()
training_data = {}
for row in f:
row = row.strip().split("\t")
essay_set = row[1]
essay = row[2]
domain1_score = int(row[6])
if essay_set == "2":
essay_set = "2_1"
add_essay_training(training_data, essay_set, essay, domain1_score)
if essay_set == "2_1":
essay_set = "2_2"
domain2_score = int(row[9])
add_essay_training(training_data, essay_set, essay, domain2_score)
return training_data
def read_test_data(test_file):
f = open(test_file)
f.readline()
test_data = {}
for row in f:
row = row.strip().split("\t")
essay_set = row[1]
essay = row[2]
domain1_predictionid = int(row[3])
if essay_set == "2":
domain2_predictionid = int(row[4])
add_essay_test(test_data, "2_1", essay, domain1_predictionid)
add_essay_test(test_data, "2_2", essay, domain2_predictionid)
else:
add_essay_test(test_data, essay_set, essay, domain1_predictionid)
return test_data
def get_character_count(essay):
return len(essay)
def get_word_count(essay):
return len(re.findall(r"\s", essay))+1
def extract_features(essays, feature_functions):
return [[f(es) for f in feature_functions] for es in essays]
def main():
print("Reading Training Data")
training = read_training_data("../Data/training_set_rel3.tsv")
print("Reading Validation Data")
test = read_test_data("../Data/valid_set.tsv")
feature_functions = [get_character_count, get_word_count]
essay_sets = sorted(training.keys())
predictions = {}
for es_set in essay_sets:
print("Making Predictions for Essay Set %s" % es_set)
features = extract_features(training[es_set]["essay"],
feature_functions)
rf = RandomForestRegressor(n_estimators = 100)
rf.fit(features,training[es_set]["score"])
features = extract_features(test[es_set]["essay"], feature_functions)
predicted_scores = rf.predict(features)
for pred_id, pred_score in zip(test[es_set]["prediction_id"],
predicted_scores):
predictions[pred_id] = round(pred_score)
output_file = "../Submissions/length_benchmark.csv"
print("Writing submission to %s" % output_file)
f = open(output_file, "w")
f.write("prediction_id,predicted_score\n")
for key in sorted(predictions.keys()):
f.write("%d,%d\n" % (key,predictions[key]))
f.close()
if __name__=="__main__":
main()
| bsd-2-clause |
joshua-cogliati-inl/raven | framework/SupervisedLearning/NDsplineRom.py | 1 | 4919 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on May 8, 2018
@author: talbpaul
Originally from SupervisedLearning.py, split in PR #650 in July 2018
Specific ROM implementation for NDsplineRom
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import math
import copy
import numpy as np
from itertools import product
from sklearn import neighbors
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils
interpolationND = utils.findCrowModule("interpolationND")
from .NDinterpolatorRom import NDinterpolatorRom
#Internal Modules End--------------------------------------------------------------------------------
class NDsplineRom(NDinterpolatorRom):
"""
An N-dimensional Spline model
"""
ROMtype = 'NDsplineRom'
def __init__(self,messageHandler,**kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages
@ In, kwargs, dict, an arbitrary list of kwargs
@ Out, None
"""
NDinterpolatorRom.__init__(self,messageHandler,**kwargs)
self.printTag = 'ND-SPLINE ROM'
for _ in range(len(self.target)):
self.interpolator.append(interpolationND.NDSpline())
def __trainLocal__(self,featureVals,targetVals):
"""
Perform training on samples. This is a specialization of the
Spline Interpolator (since it will create a Cartesian Grid in case
the samples are not a tensor)
@ In, featureVals, {array-like, sparse matrix}, shape=[n_samples, n_features],
an array of input feature values
@ Out, targetVals, array, shape = [n_samples], an array of output target
associated with the corresponding points in featureVals
"""
numDiscrPerDimension = int(math.ceil(len(targetVals)**(1./len(self.features))))
newNumberSamples = numDiscrPerDimension**len(self.features)
# get discretizations
discretizations = [ list(set(featureVals[:,d].tolist())) for d in range(len(self.features))]
# check if it is a tensor grid or not
tensorGrid = False if np.prod( [len(d) for d in discretizations] ) != len(targetVals) else True
if not tensorGrid:
self.raiseAWarning("Training set for NDSpline is not a cartesian grid. The training Tensor Grid is going to be create by interpolation!")
# isolate training data
featureVals = copy.deepcopy(featureVals)
targetVals = copy.deepcopy(targetVals)
# new discretization
newDiscretizations = [np.linspace(min(discretizations[d]), max(discretizations[d]), num=numDiscrPerDimension, dtype=float).tolist() for d in range(len(self.features))]
# new feature values
newFeatureVals = np.atleast_2d(np.asarray(list(product(*newDiscretizations))))
# new valuesContainer
newTargetVals = np.zeros( (newNumberSamples,len(self.target)) )
for index in range(len(self.target)):
# not a tensor grid => interpolate
nr = neighbors.KNeighborsRegressor(n_neighbors= min(2**len(self.features),len(targetVals)), weights='distance')
nr.fit(featureVals, targetVals[:,index])
# new target values
newTargetVals[:,index] = nr.predict(newFeatureVals)
targetVals = newTargetVals
featureVals = newFeatureVals
# fit the model
self.featv, self.targv = featureVals,targetVals
featv = interpolationND.vectd2d(featureVals[:][:])
for index, target in enumerate(self.target):
targv = interpolationND.vectd(targetVals[:,index])
self.interpolator[index].fit(featv,targv)
def __resetLocal__(self):
"""
Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None
"""
for index in range(len(self.target)):
self.interpolator[index].reset()
| apache-2.0 |
verilylifesciences/site-selection-tool | bsst/plot.py | 1 | 13466 | # Copyright 2020 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Functions for plotting incidence, recruitment, and events in the Baseline Site Selection Tool."""
from bsst import colors_config as cc
import matplotlib as mpl
import numpy as np
import pandas as pd
from bsst import plot_utils
import warnings
from bsst import ville_config
# All functions here take an axis argument and modify it in place.
# Functions in plot_utils return arguments.
pd.plotting.register_matplotlib_converters() # need to run on this pandas version
def turn_spines_off(ax, list_of_spines=['bottom', 'top', 'left', 'right']):
"""Turn off sides of the bounding box.
Args:
ax: The axis instance to format
list_of_spines: A list of spines to turn off
"""
for spine in list_of_spines:
ax.spines[spine].set_visible(False)
def format_time_axis(ax, num_ticks=4, include_labels=True, date_format='%b'):
"""Formats the x axis to be time in datetime form, with num_ticks.
Args:
ax: The axis instance we want to format, we assume time is on the xaxis.
num_ticks: An int representing the number of ticks to include on the final image.
include_labels: Bool representing whether to include text labels
date_format: A string representing how to format the date. '%b' is month
only.
"""
range = ax.get_xlim()
step_size = (range[1] - range[0]) // (num_ticks - 1)
ticks = range[0] + [i * step_size for i in np.arange(num_ticks)]
dt_ticks = mpl.dates.num2date(ticks) if not np.issubdtype(ticks.dtype, np.datetime64) else ticks
labels = [pd.to_datetime(x).strftime(date_format) for x in dt_ticks]
ax.set_xticks(ticks=ticks)
if include_labels:
ax.set_xticklabels(labels=labels, rotation=30)
ax.set_xlabel('date')
else:
ax.set_xticklabels(labels=[], visible=False)
def format_hist_time_axis(ax, bins, special_bins=[(0, '<'), (-2, '>'),
(-1, 'Did not\nsucceed')],
num_ticks=4, include_labels=True, date_format='%b-%d'):
"""Formats the x axis to be time in datetime form, with num_ticks.
Args:
ax: The axis instance we want to format, we assume time is on the xaxis.
bins: The bins used to plot the histogram
special_bins: A series of tuples with the first entry representing the
index of a bins with special values and the second entry
representing the label to give to the bin.
num_ticks: An int representing the number of ticks to include on the final image.
include_labels: Bool representing whether to include text labels
date_format: A string representing how to format the date. '%b' is month
only.
"""
eligible_bins = np.delete(bins, [idx for idx in [special_bins[i][0] for i in range(len(special_bins))]])
# Don't put a tick at the far right, as we need to see the DnC label
step_size = len(eligible_bins) // num_ticks
ticks = eligible_bins[[i * step_size for i in np.arange(num_ticks)]]
dt_ticks = mpl.dates.num2date(ticks) if not np.issubdtype(ticks.dtype, np.datetime64) else ticks
labels = [pd.to_datetime(x).strftime(date_format) for x in dt_ticks]
ticks = np.append(ticks, bins[[special_bins[i][0] for i in range(len(special_bins))]])
labels = np.append(labels, [special_bins[i][1] for i in range(len(special_bins))])
ax.set_xticks(ticks=ticks)
if include_labels:
ax.set_xticklabels(labels=labels, rotation=30)
ax.set_xlabel('date')
else:
ax.set_xticklabels(labels=[], visible=False)
def array_over_time(ax, array_to_plot, first_plot_day=None, plot_kwargs={'color':'b', 'ls':'-'}):
"""Plot array_to_plot as a function of time.
If array has a `sample` or `scenario` dimension, then all samples will be plotted with a
low opacity (alpha) value.
Args:
ax: An axes instance to plot our data on.
array_to_plot: A xr.DataArray with a time dimension, and optionally a sample OR
scenario dimension
first_plot_day: Optional, a time coordinate indicating the first date to plot.
plot_kwargs: Optional, a dictionary with keyword arguments to pass to matplotlib.plot
"""
time_dim = plot_utils.find_time_dim(array_to_plot)
shaped_data = array_to_plot.transpose(time_dim, ...)
if first_plot_day in array_to_plot.coords[time_dim].values:
data = shaped_data.sel({time_dim:slice(first_plot_day, None)})
else:
data = shaped_data
if any(item in data.dims for item in ['sample', 'scenario', 'sample_flattened']):
alpha = 0.1
else:
alpha = 1.0
ax.plot(data[time_dim], data.values, **plot_kwargs, alpha=alpha)
def cum_control_events(ax, control_events, first_plot_day, color, linestyle):
"""Plot cumulative control arm events over time or historical_time.
Args:
ax: The axis instance we want to plot on.
control_events: The xr.DataArray that we want to plot. Must have either
'time' OR 'historical_time' dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
time_dim = plot_utils.find_time_dim(control_events)
cum_events = control_events.cumsum(time_dim)
plot_array_over_time(ax, cum_events, first_plot_day, {'color': color, 'ls': linestyle})
ax.set_ylabel('Cumulative control events')
def incidence(ax, incidence, first_plot_day, color, linestyle):
"""Plot incidence over time or historical_time.
Args:
ax: The axis instance we want to plot on.
incidence: The xr.DataArray that we want to plot. Must have either
'time' OR 'historical_time' dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
array_over_time(ax, incidence, first_plot_day, {'color': color, 'ls': linestyle})
ax.set_ylabel('New cases / population')
def cum_recruits(ax, recruits, first_plot_day, color, linestyle):
"""Plot cumulative recruits over a time dimension.
Args:
ax: The axis instance we want to plot on.
recruits: The xr.DataArray that we want to plot. Must have either
'time' OR 'historical_time' dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
time_dim = plot_utils.find_time_dim(recruits)
cum_recruits = recruits.cumsum(time_dim)
array_over_time(ax, cum_recruits, first_plot_day, {'color': color, 'ls': linestyle})
ax.set_ylabel('Cumulative recruits')
def cum_subrecruits(ax, recruits, first_plot_day, color, linestyle):
"""Plot the cumulative sum of recruits to compare across many populations.
Args:
ax: A series of axis instances to plot on.
recruits: An xr.DataArray that representing the expected or
observed recruits. Must have a time dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
sel_recruits = plot_utils.unpack_participant_labels(recruits)
labels_to_plot = plot_utils.get_labels_to_plot(recruits)
num_plots = len(ax)
for i, label in enumerate(labels_to_plot):
a = ax[i]
participants = sel_recruits.sel(participant_label=label, drop=True)
a.set_title(label)
time_dim = plot_utils.find_time_dim(participants)
array_over_time(a, participants.cumsum(time_dim), first_plot_day,
{'color': color, 'ls': linestyle})
if i in [num_plots-2, num_plots-1]:
format_time_axis(a, 3, date_format='%b-%d')
else:
format_time_axis(a, 3, include_labels=False)
def recruits(dim_to_plot, ax, sorted_recruits, color, linestyle='-', label=None):
"""Plot the recruits as a histogram over dim_to_plot.
Args:
dim_to_plot: A string representing the sorted_recruits.dim to plot
along the x-axis.
ax: An axes instance to plot our data on.
sorted_recruits: A xr.DataArray representing the recruits to plot
where <dim> has been sorted into the desired display order.
color: A mpl color to use as the edgecolor
linestyle: A mpl linestyle
label: A string used as a plot label
"""
dims_to_sum = list(sorted_recruits.dims)
dims_to_sum.remove(dim_to_plot)
thc = cc.TRANSPARENT_HIST_COLOR
bh = cc.BAR_HEIGHT
lw = cc.LINE_WIDTH
sum_rec = sorted_recruits.sum(dims_to_sum)
ax.barh(sum_rec[dim_to_plot], sum_rec, height=bh, fc=color, ec=thc,
alpha=0.3, ls=linestyle, lw=lw, label=label)
def recruit_diffs(dim_to_plot, ax, sorted_recruits, recruits_left,
zero_left_edge=False):
"""Plot the difference between two sets of recruits.
Places vertical lines at the actual recruitment value. Color maps and bar
height read from colors_config.
Args:
dim_to_plot: A string representing the sorted_recruits.dim to plot
along the x-axis.
ax: An axes instance to plot our data on.
sorted_recruits: A xr.DataArray representing the recruits to plot as the
right edge of the bar chart.
where <dim> has been sorted into the desired display order.
recruits_left: A xr.DataArray representing the recruits to plot as the
left edge of the bar chart.
zero_left_edge: A boolean. If True, we plot the left edge at 0.
"""
dims_to_sum = list(sorted_recruits.dims)
dims_to_sum.remove(dim_to_plot)
sum_rec_right = sorted_recruits.sum(dims_to_sum)
sum_rec_left = recruits_left.sum(dims_to_sum)
cmap = cc.BAR_CHART_CMAP
norm = cc.BAR_CHART_NORM
bh = cc.BAR_HEIGHT
ax.set_facecolor(cc.BAR_CHART_FACECOLOR)
# Sort the left edges to match the right edges
ydim = sum_rec_right.dims[0]
sorted_rec_left = sum_rec_left.sel({ydim: sorted_recruits[ydim]})
diff = sum_rec_right - sorted_rec_left
if not zero_left_edge:
bar_plot = ax.barh(diff[ydim], diff, left=sorted_rec_left,
color=cmap(norm(diff.values)), height=bh)
# Add vertical lines at the left-most edges
lh = cc.VLINE_HEIGHT
lc = cc.VLINE_COLOR
# ycoord is lower left of box
ycoords = np.asarray([i.xy[1] for i in bar_plot.get_children()])
lower_lim = ycoords - .5 * (lh - bh)
ax.vlines(sum_rec_right, lower_lim, lower_lim + lh, lc, ls='dashed')
else:
ax.barh(diff[ydim], diff, left=None, color=cmap(norm(diff.values)),
height=bh)
# Add a line at 0 to guide the eye
ax.axvline(color='#000000', lw=1.0)
def tts(ax, events, efficacy, color, linestyle):
"""Plot the time to success distributions.
Args:
ax: The axis instance we want to plot on.
events: An xr.DataArray representing the number of events in
our control arm. Has dimensions (time, location, scenario)
efficacy: A float representing the assumed vaccine efficacy.
color: A mpl color for the bar faces
linestyle: A mpl linestyle for the bar edges
"""
lw = cc.LINE_WIDTH
thc = cc.TRANSPARENT_HIST_COLOR
ax.set_facecolor(thc)
hist, bins = plot_utils.make_tts_hist(events, efficacy)
bw = bins[1] - bins[0]
ax.bar(bins[:-1], hist, width=bw, align='edge',
fc=color, ec=thc, ls=linestyle, lw=lw, alpha=0.3)
format_hist_time_axis(ax, bins[:-1], date_format='%b-%d')
ax.axvline(x=bins[-2], color='#656565', lw=1.0, ls='--')
def tts_diff(ax, proposed_events, baseline_events, efficacy):
"""Plot the difference in time to success distributions.
Args:
ax: The axis instance we want to plot on.
proposed_events: An xr.DataArray representing the number of events in
our control arm. Has dimensions (time, location, scenario)
baseline_events: An xr.DataArray representing the baseline number of
control events. This becomes the bottom edge of the barh plot.
Has dimensions (time, location, scenario)
efficacy: A float representing the assumed vaccine efficacy.
"""
ax.set_facecolor(cc.BAR_CHART_FACECOLOR)
cmap = cc.BAR_CHART_CMAP
norm = cc.DIFF_NORM
proposed_hist, proposed_bins = plot_utils.make_tts_hist(proposed_events, efficacy)
baseline_hist, baseline_bins = plot_utils.make_tts_hist(baseline_events, efficacy)
if np.any(proposed_bins[~np.isnan(proposed_bins)] != baseline_bins[~np.isnan(baseline_bins)]):
warnings.warn(f'Proposed and baseline events have different times.')
diff = proposed_hist - baseline_hist
bw = proposed_bins[1] - proposed_bins[0]
ax.bar(proposed_bins[:-1], diff, width=bw, align='edge',
color=cmap(norm(diff)))
# Add a line at 0 to guide the eye
ax.axhline(color='#000000', lw=1.0)
# add line on the dns bin
ax.axvline(x=proposed_bins[-2], color='#656565', lw=1.0, ls='--')
# Format time axis here
format_hist_time_axis(ax, proposed_bins[:-1], date_format='%b-%d')
| bsd-3-clause |
vikhyat/dask | dask/dataframe/tests/test_dataframe.py | 1 | 90686 | from itertools import product
from datetime import datetime
from operator import getitem
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
import dask
from dask.async import get_sync
from dask.utils import raises, ignoring
import dask.dataframe as dd
from dask.dataframe.core import (repartition_divisions, _loc,
_coerce_loc_index, aca, reduction, _concat, _Frame)
from dask.dataframe.utils import eq, assert_dask_graph
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
def test_Dataframe():
result = (d['a'] + 1).compute()
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert eq(result, expected)
assert list(d.columns) == list(['a', 'b'])
full = d.compute()
assert eq(d[d['b'] > 2], full[full['b'] > 2])
assert eq(d[['a', 'b']], full[['a', 'b']])
assert eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert eq(d.head(2), full.head(2))
assert eq(d.head(3), full.head(3))
assert eq(d.head(2), dsk[('x', 0)].head(2))
assert eq(d['a'].head(2), full['a'].head(2))
assert eq(d['a'].head(3), full['a'].head(3))
assert eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert sorted(d.head(2, compute=False).dask) == \
sorted(d.head(2, compute=False).dask)
assert sorted(d.head(2, compute=False).dask) != \
sorted(d.head(3, compute=False).dask)
assert eq(d.tail(2), full.tail(2))
assert eq(d.tail(3), full.tail(3))
assert eq(d.tail(2), dsk[('x', 2)].tail(2))
assert eq(d['a'].tail(2), full['a'].tail(2))
assert eq(d['a'].tail(3), full['a'].tail(3))
assert eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert sorted(d.tail(2, compute=False).dask) == \
sorted(d.tail(2, compute=False).dask)
assert sorted(d.tail(2, compute=False).dask) != \
sorted(d.tail(3, compute=False).dask)
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert eq((d + 1), full + 1)
assert repr(d.a).startswith('dd.Series')
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10))]:
ddf = dd.from_pandas(case, 3)
assert eq(ddf.index, case.index)
assert repr(ddf.index).startswith('dd.Index')
assert raises(AttributeError, lambda: ddf.index.index)
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
assert raises(AttributeError, lambda: d.foo)
def test_column_names():
assert d.columns == ('a', 'b')
assert d[['b', 'a']].columns == ('b', 'a')
assert d['a'].columns == ('a',)
assert (d['a'] + 1).columns == ('a',)
assert (d['a'] + d['b']).columns == (None,)
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert eq(d4, full.set_index('b'))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
assert raises(NotImplementedError, lambda: ddf.set_index(['a', 'b']))
def test_split_apply_combine_on_series():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 4, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
assert eq(d.groupby(ddkey).a.min(), full.groupby(pdkey).a.min())
assert eq(d.groupby(ddkey).a.max(), full.groupby(pdkey).a.max())
assert eq(d.groupby(ddkey).a.count(), full.groupby(pdkey).a.count())
assert eq(d.groupby(ddkey).a.mean(), full.groupby(pdkey).a.mean())
assert eq(d.groupby(ddkey).a.nunique(), full.groupby(pdkey).a.nunique())
assert eq(d.groupby(ddkey).sum(), full.groupby(pdkey).sum())
assert eq(d.groupby(ddkey).min(), full.groupby(pdkey).min())
assert eq(d.groupby(ddkey).max(), full.groupby(pdkey).max())
assert eq(d.groupby(ddkey).count(), full.groupby(pdkey).count())
assert eq(d.groupby(ddkey).mean(), full.groupby(pdkey).mean())
for ddkey, pdkey in [(d.b, full.b), (d.b + 1, full.b + 1)]:
assert eq(d.a.groupby(ddkey).sum(), full.a.groupby(pdkey).sum(), check_names=False)
assert eq(d.a.groupby(ddkey).max(), full.a.groupby(pdkey).max(), check_names=False)
assert eq(d.a.groupby(ddkey).count(), full.a.groupby(pdkey).count(), check_names=False)
assert eq(d.a.groupby(ddkey).mean(), full.a.groupby(pdkey).mean(), check_names=False)
assert eq(d.a.groupby(ddkey).nunique(), full.a.groupby(pdkey).nunique(), check_names=False)
for i in range(8):
assert eq(d.groupby(d.b > i).a.sum(), full.groupby(full.b > i).a.sum())
assert eq(d.groupby(d.b > i).a.min(), full.groupby(full.b > i).a.min())
assert eq(d.groupby(d.b > i).a.max(), full.groupby(full.b > i).a.max())
assert eq(d.groupby(d.b > i).a.count(), full.groupby(full.b > i).a.count())
assert eq(d.groupby(d.b > i).a.mean(), full.groupby(full.b > i).a.mean())
assert eq(d.groupby(d.b > i).a.nunique(), full.groupby(full.b > i).a.nunique())
assert eq(d.groupby(d.a > i).b.sum(), full.groupby(full.a > i).b.sum())
assert eq(d.groupby(d.a > i).b.min(), full.groupby(full.a > i).b.min())
assert eq(d.groupby(d.a > i).b.max(), full.groupby(full.a > i).b.max())
assert eq(d.groupby(d.a > i).b.count(), full.groupby(full.a > i).b.count())
assert eq(d.groupby(d.a > i).b.mean(), full.groupby(full.a > i).b.mean())
assert eq(d.groupby(d.a > i).b.nunique(), full.groupby(full.a > i).b.nunique())
assert eq(d.groupby(d.b > i).sum(), full.groupby(full.b > i).sum())
assert eq(d.groupby(d.b > i).min(), full.groupby(full.b > i).min())
assert eq(d.groupby(d.b > i).max(), full.groupby(full.b > i).max())
assert eq(d.groupby(d.b > i).count(), full.groupby(full.b > i).count())
assert eq(d.groupby(d.b > i).mean(), full.groupby(full.b > i).mean())
assert eq(d.groupby(d.a > i).sum(), full.groupby(full.a > i).sum())
assert eq(d.groupby(d.a > i).min(), full.groupby(full.a > i).min())
assert eq(d.groupby(d.a > i).max(), full.groupby(full.a > i).max())
assert eq(d.groupby(d.a > i).count(), full.groupby(full.a > i).count())
assert eq(d.groupby(d.a > i).mean(), full.groupby(full.a > i).mean())
for ddkey, pdkey in [('a', 'a'), (d.a, full.a),
(d.a + 1, full.a + 1), (d.a > 3, full.a > 3)]:
assert eq(d.groupby(ddkey).b.sum(), full.groupby(pdkey).b.sum())
assert eq(d.groupby(ddkey).b.min(), full.groupby(pdkey).b.min())
assert eq(d.groupby(ddkey).b.max(), full.groupby(pdkey).b.max())
assert eq(d.groupby(ddkey).b.count(), full.groupby(pdkey).b.count())
assert eq(d.groupby(ddkey).b.mean(), full.groupby(pdkey).b.mean())
assert eq(d.groupby(ddkey).b.nunique(), full.groupby(pdkey).b.nunique())
assert eq(d.groupby(ddkey).sum(), full.groupby(pdkey).sum())
assert eq(d.groupby(ddkey).min(), full.groupby(pdkey).min())
assert eq(d.groupby(ddkey).max(), full.groupby(pdkey).max())
assert eq(d.groupby(ddkey).count(), full.groupby(pdkey).count())
assert eq(d.groupby(ddkey).mean(), full.groupby(pdkey).mean().astype(float))
assert sorted(d.groupby('b').a.sum().dask) == \
sorted(d.groupby('b').a.sum().dask)
assert sorted(d.groupby(d.a > 3).b.mean().dask) == \
sorted(d.groupby(d.a > 3).b.mean().dask)
# test raises with incorrect key
assert raises(KeyError, lambda: d.groupby('x'))
assert raises(KeyError, lambda: d.groupby(['a', 'x']))
assert raises(KeyError, lambda: d.groupby('a')['x'])
assert raises(KeyError, lambda: d.groupby('a')['b', 'x'])
assert raises(KeyError, lambda: d.groupby('a')[['b', 'x']])
# test graph node labels
assert_dask_graph(d.groupby('b').a.sum(), 'series-groupby-sum')
assert_dask_graph(d.groupby('b').a.min(), 'series-groupby-min')
assert_dask_graph(d.groupby('b').a.max(), 'series-groupby-max')
assert_dask_graph(d.groupby('b').a.count(), 'series-groupby-count')
# mean consists from sum and count operations
assert_dask_graph(d.groupby('b').a.mean(), 'series-groupby-sum')
assert_dask_graph(d.groupby('b').a.mean(), 'series-groupby-count')
assert_dask_graph(d.groupby('b').a.nunique(), 'series-groupby-nunique')
assert_dask_graph(d.groupby('b').sum(), 'dataframe-groupby-sum')
assert_dask_graph(d.groupby('b').min(), 'dataframe-groupby-min')
assert_dask_graph(d.groupby('b').max(), 'dataframe-groupby-max')
assert_dask_graph(d.groupby('b').count(), 'dataframe-groupby-count')
# mean consists from sum and count operations
assert_dask_graph(d.groupby('b').mean(), 'dataframe-groupby-sum')
assert_dask_graph(d.groupby('b').mean(), 'dataframe-groupby-count')
def test_groupby_multilevel_getitem():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
cases = [(ddf.groupby('a')['b'], df.groupby('a')['b']),
(ddf.groupby(['a', 'b']), df.groupby(['a', 'b'])),
(ddf.groupby(['a', 'b'])['c'], df.groupby(['a', 'b'])['c']),
(ddf.groupby('a')[['b', 'c']], df.groupby('a')[['b', 'c']]),
(ddf.groupby('a')[['b']], df.groupby('a')[['b']]),
(ddf.groupby(['a', 'b', 'c']), df.groupby(['a', 'b', 'c']))]
for d, p in cases:
assert isinstance(d, dd.core._GroupBy)
assert isinstance(p, pd.core.groupby.GroupBy)
assert eq(d.sum(), p.sum())
assert eq(d.min(), p.min())
assert eq(d.max(), p.max())
assert eq(d.count(), p.count())
assert eq(d.mean(), p.mean().astype(float))
def test_groupby_get_group():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 2, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
ddgrouped = d.groupby(ddkey)
pdgrouped = full.groupby(pdkey)
# DataFrame
assert eq(ddgrouped.get_group(2), pdgrouped.get_group(2))
assert eq(ddgrouped.get_group(3), pdgrouped.get_group(3))
# Series
assert eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))
assert eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))
def test_arithmetics():
pdf2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]})
pdf3 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]})
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
dsk4 = {('y', 0): pd.DataFrame({'a': [3, 2, 1], 'b': [7, 8, 9]},
index=[0, 1, 3]),
('y', 1): pd.DataFrame({'a': [5, 2, 8], 'b': [4, 2, 3]},
index=[5, 6, 8]),
('y', 2): pd.DataFrame({'a': [1, 4, 10], 'b': [1, 0, 5]},
index=[9, 9, 9])}
ddf4 = dd.DataFrame(dsk4, 'y', ['a', 'b'], [0, 4, 9, 9])
pdf4 =ddf4.compute()
# Arithmetics
cases = [(d, d, full, full),
(d, d.repartition([0, 1, 3, 6, 9]), full, full),
(ddf2, ddf3, pdf2, pdf3),
(ddf2.repartition([0, 3, 6, 7]), ddf3.repartition([0, 7]),
pdf2, pdf3),
(ddf2.repartition([0, 7]), ddf3.repartition([0, 2, 4, 5, 7]),
pdf2, pdf3),
(d, ddf4, full, pdf4),
(d, ddf4.repartition([0, 9]), full, pdf4),
(d.repartition([0, 3, 9]), ddf4.repartition([0, 5, 9]),
full, pdf4),
# dask + pandas
(d, pdf4, full, pdf4), (ddf2, pdf3, pdf2, pdf3)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b)
check_frame_arithmetics(l, r, el, er)
# different index, pandas raises ValueError in comparison ops
pdf5 = pd.DataFrame({'a': [3, 2, 1, 5, 2, 8, 1, 4, 10],
'b': [7, 8, 9, 4, 2, 3, 1, 0, 5]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [3, 2, 1, 5, 2 ,8, 1, 4, 10],
'b': [7, 8, 9, 5, 7, 8, 4, 2, 5]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 9])
ddf6 = dd.from_pandas(pdf6, 4)
pdf7 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=list('aaabcdeh'))
pdf8 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=list('abcdefgh'))
ddf7 = dd.from_pandas(pdf7, 3)
ddf8 = dd.from_pandas(pdf8, 4)
pdf9 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4],
'c': [5, 6, 7, 8, 1, 2, 3, 4]},
index=list('aaabcdeh'))
pdf10 = pd.DataFrame({'b': [5, 6, 7, 8, 4, 3, 2, 1],
'c': [2, 4, 5, 3, 4, 2, 1, 0],
'd': [2, 4, 5, 3, 4, 2, 1, 0]},
index=list('abcdefgh'))
ddf9 = dd.from_pandas(pdf9, 3)
ddf10 = dd.from_pandas(pdf10, 4)
# Arithmetics with different index
cases = [(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 9]), ddf6, pdf5, pdf6),
(ddf5.repartition([0, 5, 9]), ddf6.repartition([0, 7, 9]),
pdf5, pdf6),
(ddf7, ddf8, pdf7, pdf8),
(ddf7.repartition(['a', 'c', 'h']), ddf8.repartition(['a', 'h']),
pdf7, pdf8),
(ddf7.repartition(['a', 'b', 'e', 'h']),
ddf8.repartition(['a', 'e', 'h']), pdf7, pdf8),
(ddf9, ddf10, pdf9, pdf10),
(ddf9.repartition(['a', 'c', 'h']), ddf10.repartition(['a', 'h']),
pdf9, pdf10),
# dask + pandas
(ddf5, pdf6, pdf5, pdf6), (ddf7, pdf8, pdf7, pdf8),
(ddf9, pdf10, pdf9, pdf10)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
def test_arithmetics_different_index():
# index are different, but overwraps
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 2, 3, 4, 5])
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[3, 4, 5, 6, 7])
ddf2 = dd.from_pandas(pdf2, 2)
# index are not overwrapped
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 2, 3, 4, 5])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[10, 11, 12, 13, 14])
ddf4 = dd.from_pandas(pdf4, 2)
# index is included in another
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 3, 5, 7, 9])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[2, 3, 4, 5, 6])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf2, ddf1, pdf2, pdf1),
(ddf1.repartition([1, 3, 5]), ddf2.repartition([3, 4, 7]),
pdf1, pdf2),
(ddf2.repartition([3, 4, 5, 7]), ddf1.repartition([1, 2, 4, 5]),
pdf2, pdf1),
(ddf3, ddf4, pdf3, pdf4),
(ddf4, ddf3, pdf4, pdf3),
(ddf3.repartition([1, 2, 3, 4, 5]),
ddf4.repartition([10, 11, 12, 13, 14]), pdf3, pdf4),
(ddf4.repartition([10, 14]), ddf3.repartition([1, 3, 4, 5]),
pdf4, pdf3),
(ddf5, ddf6, pdf5, pdf6),
(ddf6, ddf5, pdf6, pdf5),
(ddf5.repartition([1, 7, 8, 9]), ddf6.repartition([2, 3, 4, 6]),
pdf5, pdf6),
(ddf6.repartition([2, 6]), ddf5.repartition([1, 3, 7, 9]),
pdf6, pdf5),
# dask + pandas
(ddf1, pdf2, pdf1, pdf2), (ddf2, pdf1, pdf2, pdf1),
(ddf3, pdf4, pdf3, pdf4), (ddf4, pdf3, pdf4, pdf3),
(ddf5, pdf6, pdf5, pdf6), (ddf6, pdf5, pdf6, pdf5)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
pdf7 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=[0, 2, 4, 8, 9, 10, 11, 13])
pdf8 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=[1, 3, 4, 8, 9, 11, 12, 13])
ddf7 = dd.from_pandas(pdf7, 3)
ddf8 = dd.from_pandas(pdf8, 2)
pdf9 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=[0, 2, 4, 8, 9, 10, 11, 13])
pdf10 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=[0, 3, 4, 8, 9, 11, 12, 13])
ddf9 = dd.from_pandas(pdf9, 3)
ddf10 = dd.from_pandas(pdf10, 2)
cases = [(ddf7, ddf8, pdf7, pdf8),
(ddf8, ddf7, pdf8, pdf7),
(ddf7.repartition([0, 13]),
ddf8.repartition([0, 4, 11, 14], force=True),
pdf7, pdf8),
(ddf8.repartition([-5, 10, 15], force=True),
ddf7.repartition([-1, 4, 11, 14], force=True), pdf8, pdf7),
(ddf7.repartition([0, 8, 12, 13]),
ddf8.repartition([0, 2, 8, 12, 13], force=True), pdf7, pdf8),
(ddf8.repartition([-5, 0, 10, 20], force=True),
ddf7.repartition([-1, 4, 11, 13], force=True), pdf8, pdf7),
(ddf9, ddf10, pdf9, pdf10),
(ddf10, ddf9, pdf10, pdf9),
# dask + pandas
(ddf7, pdf8, pdf7, pdf8), (ddf8, pdf7, pdf8, pdf7),
(ddf9, pdf10, pdf9, pdf10), (ddf10, pdf9, pdf10, pdf9)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
def check_series_arithmetics(l, r, el, er, allow_comparison_ops=True):
assert isinstance(l, dd.Series)
assert isinstance(r, (dd.Series, pd.Series))
assert isinstance(el, pd.Series)
assert isinstance(er, pd.Series)
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l + r, el + er)
assert eq(l * r, el * er)
assert eq(l - r, el - er)
assert eq(l / r, el / er)
assert eq(l // r, el // er)
assert eq(l ** r, el ** er)
assert eq(l % r, el % er)
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(l & r, el & er)
assert eq(l | r, el | er)
assert eq(l ^ r, el ^ er)
assert eq(l > r, el > er)
assert eq(l < r, el < er)
assert eq(l >= r, el >= er)
assert eq(l <= r, el <= er)
assert eq(l == r, el == er)
assert eq(l != r, el != er)
assert eq(l + 2, el + 2)
assert eq(l * 2, el * 2)
assert eq(l - 2, el - 2)
assert eq(l / 2, el / 2)
assert eq(l & True, el & True)
assert eq(l | True, el | True)
assert eq(l ^ True, el ^ True)
assert eq(l // 2, el // 2)
assert eq(l ** 2, el ** 2)
assert eq(l % 2, el % 2)
assert eq(l > 2, el > 2)
assert eq(l < 2, el < 2)
assert eq(l >= 2, el >= 2)
assert eq(l <= 2, el <= 2)
assert eq(l == 2, el == 2)
assert eq(l != 2, el != 2)
assert eq(2 + r, 2 + er)
assert eq(2 * r, 2 * er)
assert eq(2 - r, 2 - er)
assert eq(2 / r, 2 / er)
assert eq(True & r, True & er)
assert eq(True | r, True | er)
assert eq(True ^ r, True ^ er)
assert eq(2 // r, 2 // er)
assert eq(2 ** r, 2 ** er)
assert eq(2 % r, 2 % er)
assert eq(2 > r, 2 > er)
assert eq(2 < r, 2 < er)
assert eq(2 >= r, 2 >= er)
assert eq(2 <= r, 2 <= er)
assert eq(2 == r, 2 == er)
assert eq(2 != r, 2 != er)
assert eq(-l, -el)
assert eq(abs(l), abs(el))
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(~(l == r), ~(el == er))
def check_frame_arithmetics(l, r, el, er, allow_comparison_ops=True):
assert isinstance(l, dd.DataFrame)
assert isinstance(r, (dd.DataFrame, pd.DataFrame))
assert isinstance(el, pd.DataFrame)
assert isinstance(er, pd.DataFrame)
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l + r, el + er)
assert eq(l * r, el * er)
assert eq(l - r, el - er)
assert eq(l / r, el / er)
assert eq(l // r, el // er)
assert eq(l ** r, el ** er)
assert eq(l % r, el % er)
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(l & r, el & er)
assert eq(l | r, el | er)
assert eq(l ^ r, el ^ er)
assert eq(l > r, el > er)
assert eq(l < r, el < er)
assert eq(l >= r, el >= er)
assert eq(l <= r, el <= er)
assert eq(l == r, el == er)
assert eq(l != r, el != er)
assert eq(l + 2, el + 2)
assert eq(l * 2, el * 2)
assert eq(l - 2, el - 2)
assert eq(l / 2, el / 2)
assert eq(l & True, el & True)
assert eq(l | True, el | True)
assert eq(l ^ True, el ^ True)
assert eq(l // 2, el // 2)
assert eq(l ** 2, el ** 2)
assert eq(l % 2, el % 2)
assert eq(l > 2, el > 2)
assert eq(l < 2, el < 2)
assert eq(l >= 2, el >= 2)
assert eq(l <= 2, el <= 2)
assert eq(l == 2, el == 2)
assert eq(l != 2, el != 2)
assert eq(2 + l, 2 + el)
assert eq(2 * l, 2 * el)
assert eq(2 - l, 2 - el)
assert eq(2 / l, 2 / el)
assert eq(True & l, True & el)
assert eq(True | l, True | el)
assert eq(True ^ l, True ^ el)
assert eq(2 // l, 2 // el)
assert eq(2 ** l, 2 ** el)
assert eq(2 % l, 2 % el)
assert eq(2 > l, 2 > el)
assert eq(2 < l, 2 < el)
assert eq(2 >= l, 2 >= el)
assert eq(2 <= l, 2 <= el)
assert eq(2 == l, 2 == el)
assert eq(2 != l, 2 != el)
assert eq(-l, -el)
assert eq(abs(l), abs(el))
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(~(l == r), ~(el == er))
def test_scalar_arithmetics():
l = dd.core.Scalar({('l', 0): 10}, 'l')
r = dd.core.Scalar({('r', 0): 4}, 'r')
el = 10
er = 4
assert isinstance(l, dd.core.Scalar)
assert isinstance(r, dd.core.Scalar)
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l + r, el + er)
assert eq(l * r, el * er)
assert eq(l - r, el - er)
assert eq(l / r, el / er)
assert eq(l // r, el // er)
assert eq(l ** r, el ** er)
assert eq(l % r, el % er)
assert eq(l & r, el & er)
assert eq(l | r, el | er)
assert eq(l ^ r, el ^ er)
assert eq(l > r, el > er)
assert eq(l < r, el < er)
assert eq(l >= r, el >= er)
assert eq(l <= r, el <= er)
assert eq(l == r, el == er)
assert eq(l != r, el != er)
assert eq(l + 2, el + 2)
assert eq(l * 2, el * 2)
assert eq(l - 2, el - 2)
assert eq(l / 2, el / 2)
assert eq(l & True, el & True)
assert eq(l | True, el | True)
assert eq(l ^ True, el ^ True)
assert eq(l // 2, el // 2)
assert eq(l ** 2, el ** 2)
assert eq(l % 2, el % 2)
assert eq(l > 2, el > 2)
assert eq(l < 2, el < 2)
assert eq(l >= 2, el >= 2)
assert eq(l <= 2, el <= 2)
assert eq(l == 2, el == 2)
assert eq(l != 2, el != 2)
assert eq(2 + r, 2 + er)
assert eq(2 * r, 2 * er)
assert eq(2 - r, 2 - er)
assert eq(2 / r, 2 / er)
assert eq(True & r, True & er)
assert eq(True | r, True | er)
assert eq(True ^ r, True ^ er)
assert eq(2 // r, 2 // er)
assert eq(2 ** r, 2 ** er)
assert eq(2 % r, 2 % er)
assert eq(2 > r, 2 > er)
assert eq(2 < r, 2 < er)
assert eq(2 >= r, 2 >= er)
assert eq(2 <= r, 2 <= er)
assert eq(2 == r, 2 == er)
assert eq(2 != r, 2 != er)
assert eq(-l, -el)
assert eq(abs(l), abs(el))
assert eq(~(l == r), ~(el == er))
def test_scalar_arithmetics_with_dask_instances():
s = dd.core.Scalar({('s', 0): 10}, 's')
e = 10
pds = pd.Series([1, 2, 3, 4, 5, 6, 7])
dds = dd.from_pandas(pds, 2)
pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(pdf, 2)
# pandas Series
result = pds + s # this result pd.Series (automatically computed)
assert isinstance(result, pd.Series)
assert eq(result, pds + e)
result = s + pds # this result dd.Series
assert isinstance(result, dd.Series)
assert eq(result, pds + e)
# dask Series
result = dds + s # this result dd.Series
assert isinstance(result, dd.Series)
assert eq(result, pds + e)
result = s + dds # this result dd.Series
assert isinstance(result, dd.Series)
assert eq(result, pds + e)
# pandas DataFrame
result = pdf + s # this result pd.DataFrame (automatically computed)
assert isinstance(result, pd.DataFrame)
assert eq(result, pdf + e)
result = s + pdf # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert eq(result, pdf + e)
# dask DataFrame
result = ddf + s # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert eq(result, pdf + e)
result = s + ddf # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert eq(result, pdf + e)
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame({'A': np.arange(10),
'B': [np.nan, 1, 2, 3, 4] * 2,
'C': [np.nan] * 10,
'D': np.arange(10)},
index=list('abcdefghij'), columns=list('ABCD'))
pdf2 = pd.DataFrame(np.random.randn(10, 4),
index=list('abcdefghjk'), columns=list('ABCX'))
ps1 = pdf1.A
ps2 = pdf2.A
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({('s', 0): 4}, 's')
for l, r, el, er in [(ddf1, ddf2, pdf1, pdf2), (ds1, ds2, ps1, ps2),
(ddf1.repartition(['a', 'f', 'j']), ddf2, pdf1, pdf2),
(ds1.repartition(['a', 'b', 'f', 'j']), ds2, ps1, ps2),
(ddf1, ddf2.repartition(['a', 'k']), pdf1, pdf2),
(ds1, ds2.repartition(['a', 'b', 'd', 'h', 'k']), ps1, ps2),
(ddf1, 3, pdf1, 3), (ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4), (ds1, s, ps1, 4)]:
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
assert eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
assert eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
assert eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert eq(l, el)
assert eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert eq(l.add(r, axis=0), el.add(er, axis=0))
assert eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert eq(l.div(r, axis=0), el.div(er, axis=0))
assert eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
assert raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps2, pdf1, ps2)]:
assert eq(l, el)
assert eq(r, er)
for axis in [0, 1, 'index', 'columns']:
assert eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
assert eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
assert eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
def test_reductions():
nans1 = pd.Series([1] + [np.nan] * 4 + [2] + [np.nan] * 3)
nands1 = dd.from_pandas(nans1, 2)
nans2 = pd.Series([1] + [np.nan] * 8)
nands2 = dd.from_pandas(nans2, 2)
nans3 = pd.Series([np.nan] * 9)
nands3 = dd.from_pandas(nans3, 2)
bools = pd.Series([True, False, True, False, True], dtype=bool)
boolds = dd.from_pandas(bools, 2)
for dds, pds in [(d.b, full.b), (d.a, full.a),
(d['a'], full['a']), (d['b'], full['b']),
(nands1, nans1), (nands2, nans2), (nands3, nans3),
(boolds, bools)]:
assert isinstance(dds, dd.Series)
assert isinstance(pds, pd.Series)
assert eq(dds.sum(), pds.sum())
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
assert eq(dds.std(), pds.std())
assert eq(dds.var(), pds.var())
assert eq(dds.std(ddof=0), pds.std(ddof=0))
assert eq(dds.var(ddof=0), pds.var(ddof=0))
assert eq(dds.mean(), pds.mean())
assert eq(dds.nunique(), pds.nunique())
assert eq(dds.nbytes, pds.nbytes)
assert_dask_graph(d.b.sum(), 'series-sum')
assert_dask_graph(d.b.min(), 'series-min')
assert_dask_graph(d.b.max(), 'series-max')
assert_dask_graph(d.b.count(), 'series-count')
assert_dask_graph(d.b.std(), 'series-std(ddof=1)')
assert_dask_graph(d.b.var(), 'series-var(ddof=1)')
assert_dask_graph(d.b.std(ddof=0), 'series-std(ddof=0)')
assert_dask_graph(d.b.var(ddof=0), 'series-var(ddof=0)')
assert_dask_graph(d.b.mean(), 'series-mean')
# nunique is performed using drop-duplicates
assert_dask_graph(d.b.nunique(), 'drop-duplicates')
def test_reduction_series_invalid_axis():
for axis in [1, 'columns']:
for s in [d.a, full.a]: # both must behave the same
assert raises(ValueError, lambda: s.sum(axis=axis))
assert raises(ValueError, lambda: s.min(axis=axis))
assert raises(ValueError, lambda: s.max(axis=axis))
# only count doesn't have axis keyword
assert raises(TypeError, lambda: s.count(axis=axis))
assert raises(ValueError, lambda: s.std(axis=axis))
assert raises(ValueError, lambda: s.var(axis=axis))
assert raises(ValueError, lambda: s.mean(axis=axis))
def test_reductions_non_numeric_dtypes():
# test non-numric blocks
def check_raises(d, p, func):
assert raises((TypeError, ValueError),
lambda: getattr(d, func)().compute())
assert raises((TypeError, ValueError),
lambda: getattr(p, func)())
pds = pd.Series(['a', 'b', 'c', 'd', 'e'])
dds = dd.from_pandas(pds, 2)
assert eq(dds.sum(), pds.sum())
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
check_raises(dds, pds, 'std')
check_raises(dds, pds, 'var')
check_raises(dds, pds, 'mean')
assert eq(dds.nunique(), pds.nunique())
for pds in [pd.Series(pd.Categorical([1, 2, 3, 4, 5], ordered=True)),
pd.Series(pd.Categorical(list('abcde'), ordered=True)),
pd.Series(pd.date_range('2011-01-01', freq='D', periods=5))]:
dds = dd.from_pandas(pds, 2)
check_raises(dds, pds, 'sum')
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
check_raises(dds, pds, 'std')
check_raises(dds, pds, 'var')
check_raises(dds, pds, 'mean')
assert eq(dds.nunique(), pds.nunique())
pds= pd.Series(pd.timedelta_range('1 days', freq='D', periods=5))
dds = dd.from_pandas(pds, 2)
assert eq(dds.sum(), pds.sum())
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
# ToDo: pandas supports timedelta std, otherwise dask raises:
# incompatible type for a datetime/timedelta operation [__pow__]
# assert eq(dds.std(), pds.std())
# assert eq(dds.var(), pds.var())
# ToDo: pandas supports timedelta std, otherwise dask raises:
# TypeError: unsupported operand type(s) for *: 'float' and 'Timedelta'
# assert eq(dds.mean(), pds.mean())
assert eq(dds.nunique(), pds.nunique())
def test_reductions_frame():
assert eq(d.sum(), full.sum())
assert eq(d.min(), full.min())
assert eq(d.max(), full.max())
assert eq(d.count(), full.count())
assert eq(d.std(), full.std())
assert eq(d.var(), full.var())
assert eq(d.std(ddof=0), full.std(ddof=0))
assert eq(d.var(ddof=0), full.var(ddof=0))
assert eq(d.mean(), full.mean())
for axis in [0, 1, 'index', 'columns']:
assert eq(d.sum(axis=axis), full.sum(axis=axis))
assert eq(d.min(axis=axis), full.min(axis=axis))
assert eq(d.max(axis=axis), full.max(axis=axis))
assert eq(d.count(axis=axis), full.count(axis=axis))
assert eq(d.std(axis=axis), full.std(axis=axis))
assert eq(d.var(axis=axis), full.var(axis=axis))
assert eq(d.std(axis=axis, ddof=0), full.std(axis=axis, ddof=0))
assert eq(d.var(axis=axis, ddof=0), full.var(axis=axis, ddof=0))
assert eq(d.mean(axis=axis), full.mean(axis=axis))
assert raises(ValueError, lambda: d.sum(axis='incorrect').compute())
# axis=0
assert_dask_graph(d.sum(), 'dataframe-sum')
assert_dask_graph(d.min(), 'dataframe-min')
assert_dask_graph(d.max(), 'dataframe-max')
assert_dask_graph(d.count(), 'dataframe-count')
# std, var, mean consists from sum and count operations
assert_dask_graph(d.std(), 'dataframe-sum')
assert_dask_graph(d.std(), 'dataframe-count')
assert_dask_graph(d.var(), 'dataframe-sum')
assert_dask_graph(d.var(), 'dataframe-count')
assert_dask_graph(d.mean(), 'dataframe-sum')
assert_dask_graph(d.mean(), 'dataframe-count')
# axis=1
assert_dask_graph(d.sum(axis=1), 'dataframe-sum(axis=1)')
assert_dask_graph(d.min(axis=1), 'dataframe-min(axis=1)')
assert_dask_graph(d.max(axis=1), 'dataframe-max(axis=1)')
assert_dask_graph(d.count(axis=1), 'dataframe-count(axis=1)')
assert_dask_graph(d.std(axis=1), 'dataframe-std(axis=1, ddof=1)')
assert_dask_graph(d.var(axis=1), 'dataframe-var(axis=1, ddof=1)')
assert_dask_graph(d.mean(axis=1), 'dataframe-mean(axis=1)')
def test_reductions_frame_dtypes():
df = pd.DataFrame({'int': [1, 2, 3, 4, 5, 6, 7, 8],
'float': [1., 2., 3., 4., np.nan, 6., 7., 8.],
'dt': [pd.NaT] + [datetime(2011, i, 1) for i in range(1, 8)],
'str': list('abcdefgh')})
ddf = dd.from_pandas(df, 3)
assert eq(df.sum(), ddf.sum())
assert eq(df.min(), ddf.min())
assert eq(df.max(), ddf.max())
assert eq(df.count(), ddf.count())
assert eq(df.std(), ddf.std())
assert eq(df.var(), ddf.var())
assert eq(df.std(ddof=0), ddf.std(ddof=0))
assert eq(df.var(ddof=0), ddf.var(ddof=0))
assert eq(df.mean(), ddf.mean())
assert eq(df._get_numeric_data(), ddf._get_numeric_data())
numerics = ddf[['int', 'float']]
assert numerics._get_numeric_data().dask == numerics.dask
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert eq(s.describe(), ds.describe())
assert eq(df.describe(), ddf.describe())
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert eq(df.describe(), ddf.describe())
def test_cumulative():
pdf = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 5)
assert eq(ddf.cumsum(), pdf.cumsum())
assert eq(ddf.cumprod(), pdf.cumprod())
assert eq(ddf.cummin(), pdf.cummin())
assert eq(ddf.cummax(), pdf.cummax())
assert eq(ddf.cumsum(axis=1), pdf.cumsum(axis=1))
assert eq(ddf.cumprod(axis=1), pdf.cumprod(axis=1))
assert eq(ddf.cummin(axis=1), pdf.cummin(axis=1))
assert eq(ddf.cummax(axis=1), pdf.cummax(axis=1))
assert eq(ddf.a.cumsum(), pdf.a.cumsum())
assert eq(ddf.a.cumprod(), pdf.a.cumprod())
assert eq(ddf.a.cummin(), pdf.a.cummin())
assert eq(ddf.a.cummax(), pdf.a.cummax())
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert eq(ddf.x.dropna(), df.x.dropna())
assert eq(ddf.y.dropna(), df.y.dropna())
assert eq(ddf.z.dropna(), df.z.dropna())
assert eq(ddf.dropna(), df.dropna())
assert eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'd': [False] * 9,
'e': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert eq(ddf.where(ddcond), pdf.where(pdcond))
assert eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
# ToDo: Should work on pandas 0.17
# https://github.com/pydata/pandas/pull/10283
# assert eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
# assert eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert eq(dd.map_partitions(lambda a, b: a + b, None, d.a, d.b),
full.a + full.b)
assert eq(dd.map_partitions(lambda a, b, c: a + b + c, None, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert eq(d.map_partitions(lambda df: df, columns=d.columns), full)
assert eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1), columns=None)
assert eq(result, full.sum(axis=1))
def test_map_partitions_names():
func = lambda x: x
assert sorted(dd.map_partitions(func, d.columns, d).dask) == \
sorted(dd.map_partitions(func, d.columns, d).dask)
assert sorted(dd.map_partitions(lambda x: x, d.columns, d, token=1).dask) == \
sorted(dd.map_partitions(lambda x: x, d.columns, d, token=1).dask)
func = lambda x, y: x
assert sorted(dd.map_partitions(func, d.columns, d, d).dask) == \
sorted(dd.map_partitions(func, d.columns, d, d).dask)
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a.columns, a)
assert b.columns == a.columns
assert eq(df, b)
b = dd.map_partitions(lambda x: x, a.x.name, a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x.name, a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, None, a)
assert b.name == None
assert isinstance(b, dd.Series)
b = dd.map_partitions(lambda df: df.x + 1, 'x', a)
assert isinstance(b, dd.Series)
assert b.name == 'x'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
assert b.columns == a.columns
b = a.map_partitions(lambda df: df.x + 1, columns=None)
assert isinstance(b, dd.Series)
assert b.name == None
b = a.map_partitions(lambda df: df.x + 1, columns='x')
assert isinstance(b, dd.Series)
assert b.name == 'x'
def test_map_partitions_keeps_kwargs_in_dict():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
assert "'x': 5" in str(b.dask)
eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_drop_duplicates():
assert eq(d.a.drop_duplicates(), full.a.drop_duplicates())
assert eq(d.drop_duplicates(), full.drop_duplicates())
assert eq(d.index.drop_duplicates(), full.index.drop_duplicates())
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
if pd.__version__ < '0.17':
kwargs = [{'take_last': False}, {'take_last': True}]
else:
kwargs = [{'keep': 'first'}, {'keep': 'last'}]
for kwarg in kwargs:
assert eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_full_groupby():
assert raises(Exception, lambda: d.groupby('does_not_exist'))
assert raises(Exception, lambda: d.groupby('a').does_not_exist)
assert 'b' in dir(d.groupby('a'))
def func(df):
df['b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert sorted(d.groupby('a').apply(func).dask) == \
sorted(d.groupby('a').apply(func).dask)
def test_groupby_on_index():
e = d.set_index('a')
efull = full.set_index('a')
assert eq(d.groupby('a').b.mean(), e.groupby(e.index).b.mean())
def func(df):
df.loc[:, 'b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func).set_index('a'),
e.groupby(e.index).apply(func))
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert eq(d.groupby('a').apply(func).set_index('a'),
full.groupby('a').apply(func).set_index('a'))
assert eq(efull.groupby(efull.index).apply(func),
e.groupby(e.index).apply(func))
def test_set_partition():
d2 = d.set_partition('b', [0, 2, 9])
assert d2.divisions == (0, 2, 9)
expected = full.set_index('b')
assert eq(d2, expected)
def test_set_partition_compute():
d2 = d.set_partition('b', [0, 2, 9])
d3 = d.set_partition('b', [0, 2, 9], compute=True)
assert eq(d2, d3)
assert eq(d2, full.set_index('b'))
assert eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_partition(d.b, [0, 2, 9])
d5 = d.set_partition(d.b, [0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert eq(d4, d5)
assert eq(d4, exp)
assert eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_get_division():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_division(0)
assert isinstance(div1, dd.DataFrame)
eq(div1, pdf.loc[0:3])
div2 = ddf.get_division(1)
eq(div2, pdf.loc[4:7])
div3 = ddf.get_division(2)
eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_division(0)
assert isinstance(div1, dd.Series)
eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_division(1)
eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_division(2)
eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
assert raises(ValueError, lambda: ddf.get_division(-1))
assert raises(ValueError, lambda: ddf.get_division(3))
def test_categorize():
dsk = {('x', 0): pd.DataFrame({'a': ['Alice', 'Bob', 'Alice'],
'b': ['C', 'D', 'E']},
index=[0, 1, 2]),
('x', 1): pd.DataFrame({'a': ['Bob', 'Charlie', 'Charlie'],
'b': ['A', 'A', 'B']},
index=[3, 4, 5])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 3, 5])
full = d.compute()
c = d.categorize('a')
cfull = c.compute()
assert cfull.dtypes['a'] == 'category'
assert cfull.dtypes['b'] == 'O'
assert list(cfull.a.astype('O')) == list(full.a)
assert (d._get(c.dask, c._keys()[:1])[0].dtypes == cfull.dtypes).all()
assert (d.categorize().compute().dtypes == 'category').all()
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_cache():
d2 = d.cache()
assert all(task[0] == getitem for task in d2.dask.values())
assert eq(d2.a, d.a)
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
a = dd.from_pandas(df, npartitions=3)
result = a.x.value_counts()
expected = df.x.value_counts()
# because of pandas bug, value_counts doesn't hold name (fixed in 0.17)
# https://github.com/pydata/pandas/pull/10419
assert eq(result, expected, check_names=False)
def test_isin():
assert eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
# because of a pandas bug, name is not preserved
# https://github.com/pydata/pandas/pull/10881
assert result.name == 'b'
assert result.compute().name == 'b'
assert eq(result, exp, check_names=False)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert eq(ddf.quantile(axis=1), df.quantile(axis=1))
assert raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert eq(d.index, full.index)
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert eq(d.loc[5], full.loc[5])
assert eq(d.loc[3:8], full.loc[3:8])
assert eq(d.loc[:8], full.loc[:8])
assert eq(d.loc[3:], full.loc[3:])
assert eq(d.a.loc[5], full.a.loc[5])
assert eq(d.a.loc[3:8], full.a.loc[3:8])
assert eq(d.a.loc[:8], full.a.loc[:8])
assert eq(d.a.loc[3:], full.a.loc[3:])
assert raises(KeyError, lambda: d.loc[1000])
assert eq(d.loc[1000:], full.loc[1000:])
assert eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_with_text_dates():
A = tm.makeTimeSeries(10).iloc[:5]
B = tm.makeTimeSeries(10).iloc[5:]
s = dd.Series({('df', 0): A, ('df', 1): B}, 'df', None,
[A.index.min(), A.index.max(), B.index.max()])
assert s.loc['2000': '2010'].divisions == s.divisions
assert eq(s.loc['2000': '2010'], s)
assert len(s.loc['2000-01-03': '2000-01-05'].compute()) == 3
def test_loc_with_series():
assert eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_iloc_raises():
assert raises(NotImplementedError, lambda: d.iloc[:5])
def test_getitem():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
columns=list('ABC'))
ddf = dd.from_pandas(df, 2)
assert eq(ddf['A'], df['A'])
assert eq(ddf[['A', 'B']], df[['A', 'B']])
assert eq(ddf[ddf.C], df[df.C])
assert eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
assert raises(KeyError, lambda: df['X'])
assert raises(KeyError, lambda: df[['A', 'X']])
assert raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert eq(ddf[0], df[0])
assert eq(ddf[[1, 2]], df[[1, 2]])
assert raises(KeyError, lambda: df[8])
assert raises(KeyError, lambda: df[[1, 8]])
def test_assign():
assert eq(d.assign(c=d.a + 1, e=d.a + d.b),
full.assign(c=full.a + 1, e=full.a + full.b))
def test_map():
assert eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert eq(e, f)
assert eq(d.a, type(d.a)(*d.a._args))
assert eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame({('x', 0): 'foo', ('x', 1): 'bar'}, 'x',
['a', 'b'], divisions=[None, None, None])
assert not df.known_divisions
df = dd.DataFrame({('x', 0): 'foo'}, 'x',
['a', 'b'], divisions=[0, 1])
assert d.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None, None, None])
full = d.compute(get=dask.get)
assert eq(d.a.sum(), full.a.sum())
assert eq(d.a + d.b + 1, full.a + full.b + 1)
assert raises(ValueError, lambda: d.loc[3])
def test_concat2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
a = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
b = dd.DataFrame(dsk, 'y', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
c = dd.DataFrame(dsk, 'y', ['b', 'c'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60],
'd': [70, 80, 90]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10],
'd': [90, 80, 70]},
index=[3, 4, 5])}
d = dd.DataFrame(dsk, 'y', ['b', 'c', 'd'], [0, 3, 5])
cases = [[a, b], [a, c], [a, d]]
assert dd.concat([a]) is a
for case in cases:
result = dd.concat(case)
pdcase = [c.compute() for c in case]
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase), result)
assert result.dask == dd.concat(case).dask
result = dd.concat(case, join='inner')
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase, join='inner'), result)
assert result.dask == dd.concat(case, join='inner').dask
msg = ('Unable to concatenate DataFrame with unknown division '
'specifying axis=1')
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case, axis=1)
def test_concat3():
pdf1 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCDE'), index=list('abcdef'))
pdf2 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCFG'), index=list('ghijkl'))
pdf3 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCHI'), index=list('mnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
result = dd.concat([ddf1, ddf2])
assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions
assert result.npartitions == ddf1.npartitions + ddf2.npartitions
assert eq(result, pd.concat([pdf1, pdf2]))
assert eq(dd.concat([ddf1, ddf2], interleave_partitions=True),
pd.concat([pdf1, pdf2]))
result = dd.concat([ddf1, ddf2, ddf3])
assert result.divisions == (ddf1.divisions[:-1] + ddf2.divisions[:-1] +
ddf3.divisions)
assert result.npartitions == (ddf1.npartitions + ddf2.npartitions +
ddf3.npartitions)
assert eq(result, pd.concat([pdf1, pdf2, pdf3]))
assert eq(dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),
pd.concat([pdf1, pdf2, pdf3]))
def test_concat4_interleave_partitions():
pdf1 = pd.DataFrame(np.random.randn(10, 5),
columns=list('ABCDE'), index=list('abcdefghij'))
pdf2 = pd.DataFrame(np.random.randn(13, 5),
columns=list('ABCDE'), index=list('fghijklmnopqr'))
pdf3 = pd.DataFrame(np.random.randn(13, 6),
columns=list('CDEXYZ'), index=list('fghijklmnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
msg = ('All inputs have known divisions which cannnot be '
'concatenated in order. Specify '
'interleave_partitions=True to ignore order')
cases = [[ddf1, ddf1], [ddf1, ddf2], [ddf1, ddf3], [ddf2, ddf1],
[ddf2, ddf3], [ddf3, ddf1], [ddf3, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case)
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
msg = "'join' must be 'inner' or 'outer'"
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat([ddf1, ddf1], join='invalid', interleave_partitions=True)
def test_concat5():
pdf1 = pd.DataFrame(np.random.randn(7, 5),
columns=list('ABCDE'), index=list('abcdefg'))
pdf2 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('abcdefg'))
pdf3 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('cdefghi'))
pdf4 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('cdefghi'))
pdf5 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('fklmnop'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
ddf4 = dd.from_pandas(pdf4, 2)
ddf5 = dd.from_pandas(pdf5, 3)
cases = [[ddf1, ddf2], [ddf1, ddf3], [ddf1, ddf4], [ddf1, ddf5],
[ddf3, ddf4], [ddf3, ddf5], [ddf5, ddf1, ddf4], [ddf5, ddf3],
[ddf1.A, ddf4.A], [ddf2.F, ddf3.F], [ddf4.A, ddf5.A],
[ddf1.A, ddf4.F], [ddf2.F, ddf3.H], [ddf4.A, ddf5.B],
[ddf1, ddf4.A], [ddf3.F, ddf2], [ddf5, ddf1.A, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
# Dask + pandas
cases = [[ddf1, pdf2], [ddf1, pdf3], [pdf1, ddf4],
[pdf1.A, ddf4.A], [ddf2.F, pdf3.F],
[ddf1, pdf4.A], [ddf3.F, pdf2], [ddf2, pdf1, ddf3.F]]
for case in cases:
pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
def test_append():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]})
df2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
df3 = pd.DataFrame({'b': [1, 2, 3, 4, 5, 6],
'c': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
ddf = dd.from_pandas(df, 2)
ddf2 = dd.from_pandas(df2, 2)
ddf3 = dd.from_pandas(df3, 2)
assert eq(ddf.append(ddf2), df.append(df2))
assert eq(ddf.a.append(ddf2.a), df.a.append(df2.a))
# different columns
assert eq(ddf.append(ddf3), df.append(df3))
assert eq(ddf.a.append(ddf3.b), df.a.append(df3.b))
# dask + pandas
assert eq(ddf.append(df2), df.append(df2))
assert eq(ddf.a.append(df2.a), df.a.append(df2.a))
assert eq(ddf.append(df3), df.append(df3))
assert eq(ddf.a.append(df3.b), df.a.append(df3.b))
s = pd.Series([7, 8], name=6, index=['a', 'b'])
assert eq(ddf.append(s), df.append(s))
df4 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[4, 5, 6, 7, 8, 9])
ddf4 = dd.from_pandas(df4, 2)
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
with tm.assertRaisesRegexp(ValueError, msg):
ddf.append(ddf4)
def test_append2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
ddf1 = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
ddf2 = dd.DataFrame(dsk, 'y', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
ddf3 = dd.DataFrame(dsk, 'y', ['b', 'c'], [None, None])
assert eq(ddf1.append(ddf2), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1), ddf3.b.compute().append(ddf1.compute()))
# Dask + pandas
assert eq(ddf1.append(ddf2.compute()), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1.compute()), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2.compute()), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1.compute()), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3.compute()), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1.compute()), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3.compute()), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1.compute()), ddf3.b.compute().append(ddf1.compute()))
def test_dataframe_series_are_dillable():
try:
import dill
except ImportError:
return
e = d.groupby(d.a).b.sum()
f = dill.loads(dill.dumps(e))
assert eq(e, f)
def test_dataframe_series_are_pickleable():
try:
import cloudpickle
import pickle
except ImportError:
return
dumps = cloudpickle.dumps
loads = pickle.loads
e = d.groupby(d.a).b.sum()
f = loads(dumps(e))
assert eq(e, f)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5])
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert len(a.compute()) + len(b.compute()) == len(full)
def test_series_nunique():
ps = pd.Series(list('aaabbccccdddeee'), name='a')
s = dd.from_pandas(ps, npartitions=3)
assert eq(s.nunique(), ps.nunique())
def test_dataframe_groupby_nunique():
strings = list('aaabbccccdddeee')
data = np.random.randn(len(strings))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
def test_dataframe_groupby_nunique_across_group_same_value():
strings = list('aaabbccccdddeee')
data = list(map(int, '123111223323412'))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
@pytest.mark.parametrize(['npartitions', 'freq', 'closed', 'label'],
list(product([2, 5], ['30T', 'h', 'd', 'w', 'M'],
['right', 'left'], ['right', 'left'])))
def test_series_resample(npartitions, freq, closed, label):
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
df = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(df, npartitions=npartitions)
# Series output
result = ds.resample(freq, how='mean', closed=closed, label=label).compute()
expected = df.resample(freq, how='mean', closed=closed, label=label)
tm.assert_series_equal(result, expected, check_dtype=False)
# Frame output
resampled = ds.resample(freq, how='ohlc', closed=closed, label=label)
divisions = resampled.divisions
result = resampled.compute()
expected = df.resample(freq, how='ohlc', closed=closed, label=label)
tm.assert_frame_equal(result, expected, check_dtype=False)
assert expected.index[0] == divisions[0]
assert expected.index[-1] == divisions[-1]
def test_series_resample_not_implemented():
index = pd.date_range(start='20120102', periods=100, freq='T')
s = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(s, npartitions=5)
# Frequency doesn't evenly divide day
assert raises(NotImplementedError, lambda: ds.resample('57T'))
# Kwargs not implemented
kwargs = {'fill_method': 'bfill', 'limit': 2, 'loffset': 2, 'base': 2,
'convention': 'end', 'kind': 'period'}
for k, v in kwargs.items():
assert raises(NotImplementedError, lambda: ds.resample('6h', **{k: v}))
def test_set_partition_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_partition('y', ['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(get=get_sync).index[-2:]) == ['d', 'd']
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([d._get(d.dask, k) for k in keys])
assert eq(orig, sp)
assert eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert eq(a, b)
assert eq(a._get(b.dask, (b._name, 0)), df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
assert raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
assert result == {('b', 0): (_loc, ('a', 0), 1, 3, False),
('b', 1): (_loc, ('a', 1), 3, 4, False),
('b', 2): (_loc, ('a', 1), 4, 6, False),
('b', 3): (_loc, ('a', 1), 6, 7, True),
('c', 0): (pd.concat, (list, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df.y)
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert eq(a.x.dropna(), df.x.dropna())
assert eq(a.x.fillna(100), df.x.fillna(100))
assert eq(a.fillna(100), df.fillna(100))
assert eq(a.x.between(2, 4), df.x.between(2, 4))
assert eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert eq(a.x.notnull(), df.x.notnull())
assert len(a.sample(0.5).compute()) < len(df)
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.5)
assert eq(b, b)
c = a.sample(0.5, random_state=1234)
d = a.sample(0.5, random_state=1234)
assert eq(c, d)
assert a.sample(0.5)._name != a.sample(0.5)._name
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert eq(a.x.dt.date, df.x.dt.date, check_names=False)
assert (a.x.dt.to_pydatetime().compute() == df.x.dt.to_pydatetime()).all()
assert a.x.dt.date.dask == a.x.dt.date.dask
assert a.x.dt.to_pydatetime().dask == a.x.dt.to_pydatetime().dask
def test_str_accessor():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'D']})
a = dd.from_pandas(df, 2)
assert 'upper' in dir(a.x.str)
assert eq(a.x.str.upper(), df.x.str.upper())
assert a.x.str.upper().dask == a.x.str.upper().dask
def test_empty_max():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
['x'], [None, None, None])
assert eq(a.x.max(), 1)
def test_loc_on_numpy_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(np.datetime64, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(pd.Timestamp, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t('2014')], '2014'), t)
def test_nlargest_series():
s = pd.Series([1, 3, 5, 2, 4, 6])
ss = dd.from_pandas(s, npartitions=2)
assert eq(ss.nlargest(2), s.nlargest(2))
def test_categorical_set_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': ['a', 'b', 'b', 'c']})
df['y'] = df.y.astype('category')
a = dd.from_pandas(df, npartitions=2)
with dask.set_options(get=get_sync):
b = a.set_index('y')
df2 = df.set_index('y')
assert list(b.index.compute()), list(df2.index)
b = a.set_index(a.y)
df2 = df.set_index(df.y)
assert list(b.index.compute()), list(df2.index)
def test_query():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
q = a.query('x**2 > y')
with ignoring(ImportError):
assert eq(q, df.query('x**2 > y'))
def test_deterministic_arithmetic_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted((a.x + a.y ** 2).dask) == sorted((a.x + a.y ** 2).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x + a.y ** 3).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x - a.y ** 2).dask)
def test_deterministic_reduction_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert a.x.sum()._name == a.x.sum()._name
assert a.x.mean()._name == a.x.mean()._name
assert a.x.var()._name == a.x.var()._name
assert a.x.min()._name == a.x.min()._name
assert a.x.max()._name == a.x.max()._name
assert a.x.count()._name == a.x.count()._name
# Test reduction without token string
assert sorted(reduction(a.x, len, np.sum).dask) !=\
sorted(reduction(a.x, np.sum, np.sum).dask)
assert sorted(reduction(a.x, len, np.sum).dask) ==\
sorted(reduction(a.x, len, np.sum).dask)
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert sorted(a.x.drop_duplicates().dask) == \
sorted(a.x.drop_duplicates().dask)
assert sorted(a.groupby('x').y.mean().dask) == \
sorted(a.groupby('x').y.mean().dask)
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert sorted(aca(a.x, f, f, a.x.name).dask) !=\
sorted(aca(a.x, f2, f2, a.x.name).dask)
assert sorted(aca(a.x, f, f, a.x.name).dask) ==\
sorted(aca(a.x, f, f, a.x.name).dask)
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert eq(a.drop('y', axis=1), df.drop('y', axis=1))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert eq(np.cos(df['x']), np.cos(ddf['x']))
assert eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
assert raises(ValueError, lambda: d.rename(index=renamer))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert eq(s.to_frame(), a.to_frame())
assert eq(s.to_frame('bar'), a.to_frame('bar'))
def test_series_groupby_propagates_names():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
ddf = dd.from_pandas(df, 2)
func = lambda df: df['y'].sum()
result = ddf.groupby('x').apply(func, columns='y')
expected = df.groupby('x').apply(func)
expected.name = 'y'
tm.assert_series_equal(result.compute(), expected)
def test_series_groupby():
s = pd.Series([1, 2, 2, 1, 1])
pd_group = s.groupby(s)
ss = dd.from_pandas(s, npartitions=2)
dask_group = ss.groupby(ss)
pd_group2 = s.groupby(s + 1)
dask_group2 = ss.groupby(ss + 1)
for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:
assert eq(dg.count(), pdg.count())
assert eq(dg.sum(), pdg.sum())
assert eq(dg.min(), pdg.min())
assert eq(dg.max(), pdg.max())
assert raises(TypeError, lambda: ss.groupby([1, 2]))
sss = dd.from_pandas(s, npartitions=3)
assert raises(NotImplementedError, lambda: ss.groupby(sss))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
a = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
eq(a.x.apply(lambda x: x + 1), df.x.apply(lambda x: x + 1))
eq(a.apply(lambda xy: xy[0] + xy[1], axis=1, columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert raises(NotImplementedError, lambda: a.apply(lambda xy: xy, axis=0))
assert raises(ValueError, lambda: a.apply(lambda xy: xy, axis=1))
func = lambda x: pd.Series([x, x])
eq(a.x.apply(func, name=[0, 1]), df.x.apply(func))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert (i.index.day == a.index.day.compute()).all()
assert (i.index.month == a.index.month.compute()).all()
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.16.2',
reason="nlargest not in pandas pre 0.16.2")
def test_nlargest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.nlargest(5, 'a')
exp = df.nlargest(5, 'a')
eq(res, exp)
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.16.2',
reason="nlargest not in pandas pre 0.16.2")
def test_nlargest_multiple_columns():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.nlargest(5, ['a', 'b'])
expected = df.nlargest(5, ['a', 'b'])
eq(result, expected)
def test_groupby_index_array():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
eq(df.A.groupby(df.index.month).nunique(),
ddf.A.groupby(ddf.index.month).nunique(), check_names=False)
def test_groupby_set_index():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
assert raises(NotImplementedError,
lambda: ddf.groupby(df.index.month, as_index=False))
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.reset_index()
exp = df.reset_index()
assert len(res.index.compute()) == len(exp.index)
assert res.columns == tuple(exp.columns)
assert_array_almost_equal(res.compute().values, exp.values)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
| bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/qda.py | 21 | 7639 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 2 | 21999 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
mojoboss/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
djgagne/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
liyu1990/sklearn | sklearn/model_selection/_validation.py | 4 | 35605 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import KFold
from ._split import LabelKFold
from ._split import LeaveOneLabelOut
from ._split import LeaveOneOut
from ._split import LeavePLabelOut
from ._split import LeavePOut
from ._split import ShuffleSplit
from ._split import LabelShuffleSplit
from ._split import StratifiedKFold
from ._split import StratifiedShuffleSplit
from ._split import PredefinedSplit
from ._split import check_cv, _safe_split
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
ALL_CVS = {'KFold': KFold,
'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeaveOneOut': LeaveOneOut,
'LeavePLabelOut': LeavePLabelOut,
'LeavePOut': LeavePOut,
'ShuffleSplit': ShuffleSplit,
'LabelShuffleSplit': LabelShuffleSplit,
'StratifiedKFold': StratifiedKFold,
'StratifiedShuffleSplit': StratifiedShuffleSplit,
'PredefinedSplit': PredefinedSplit}
LABEL_CVS = {'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeavePLabelOut': LeavePLabelOut,
'LabelShuffleSplit': LabelShuffleSplit}
def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <validate>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <cross_validation>`
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, labels))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <validate>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <_cross_validation>`
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
predictions : ndarray
This is the result of calling 'predict'
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params)
for train, test in cv.split(X, y, labels))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <validate>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
predictions : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
predictions = estimator.predict(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, labels=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <validate>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <_cross_validation>`
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state),
labels, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, labels, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, labels):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, labels=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <validate>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <_cross_validation>`
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, labels)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, labels))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, labels=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validate>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <_cross_validation>`
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, labels) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
birm/Elemental | python/lapack_like/spectral.py | 1 | 53459 | #
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from ..core import *
from ..blas_like import Copy, EntrywiseMap
from ..io import ProcessEvents
import ctypes
# Hermitian tridiagonal eigensolvers
# ==================================
class HermitianEigSubset_s(ctypes.Structure):
_fields_ = [("indexSubset",bType),
("lowerIndex",iType),("upperIndex",iType),
("rangeSubset",bType),
("lowerBound",sType),("upperBound",sType)]
class HermitianEigSubset_d(ctypes.Structure):
_fields_ = [("indexSubset",bType),
("lowerIndex",iType),("upperIndex",iType),
("rangeSubset",bType),
("lowerBound",dType),("upperBound",dType)]
lib.ElHermitianTridiagEig_s.argtypes = \
lib.ElHermitianTridiagEig_d.argtypes = \
lib.ElHermitianTridiagEig_c.argtypes = \
lib.ElHermitianTridiagEig_z.argtypes = \
lib.ElHermitianTridiagEigDist_s.argtypes = \
lib.ElHermitianTridiagEigDist_d.argtypes = \
lib.ElHermitianTridiagEigDist_c.argtypes = \
lib.ElHermitianTridiagEigDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,c_uint]
lib.ElHermitianTridiagEigPair_s.argtypes = \
lib.ElHermitianTridiagEigPair_d.argtypes = \
lib.ElHermitianTridiagEigPair_c.argtypes = \
lib.ElHermitianTridiagEigPair_z.argtypes = \
lib.ElHermitianTridiagEigPairDist_s.argtypes = \
lib.ElHermitianTridiagEigPairDist_d.argtypes = \
lib.ElHermitianTridiagEigPairDist_c.argtypes = \
lib.ElHermitianTridiagEigPairDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,c_uint]
lib.ElHermitianTridiagEigPartial_s.argtypes = \
lib.ElHermitianTridiagEigPartial_c.argtypes = \
lib.ElHermitianTridiagEigPartialDist_s.argtypes = \
lib.ElHermitianTridiagEigPartialDist_c.argtypes = \
[c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElHermitianTridiagEigPartial_d.argtypes = \
lib.ElHermitianTridiagEigPartial_z.argtypes = \
lib.ElHermitianTridiagEigPartialDist_d.argtypes = \
lib.ElHermitianTridiagEigPartialDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
lib.ElHermitianTridiagEigPairPartial_s.argtypes = \
lib.ElHermitianTridiagEigPairPartial_c.argtypes = \
lib.ElHermitianTridiagEigPairPartialDist_s.argtypes = \
lib.ElHermitianTridiagEigPairPartialDist_c.argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElHermitianTridiagEigPairPartial_d.argtypes = \
lib.ElHermitianTridiagEigPairPartial_z.argtypes = \
lib.ElHermitianTridiagEigPairPartialDist_d.argtypes = \
lib.ElHermitianTridiagEigPairPartialDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
def HermitianTridiagEig(d,dSub,vectors=False,sort=ASCENDING,subset=None):
if type(d) is Matrix:
w = Matrix(d.tag)
if vectors:
X = Matrix(dSub.tag)
if subset == None:
args = [d.obj,dSub.obj,w.obj,X.obj,sort]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPair_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPair_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPair_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPair_z(*args)
else: DataExcept()
else:
args = [d.obj,dSub.obj,w.obj,X.obj,sort,subset]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPairPartial_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPairPartial_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPairPartial_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPairPartial_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [d.obj,dSub.obj,w.obj,sort]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPair_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPair_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPair_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPair_z(*args)
else: DataExcept()
else:
args = [d.obj,dSub.obj,w.obj,sort,subset]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPairPartial_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPairPartial_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPairPartial_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPairPartial_z(*args)
else: DataExcept()
return w
elif type(d) is DistMatrix:
w = DistMatrix(d.tag,STAR,STAR,d.Grid())
if vectors:
X = DistMatrix(dSub.tag,STAR,VR,dSub.Grid())
if subset == None:
args = [d.obj,dSub.obj,w.obj,X.obj,sort]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPairDist_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPairDist_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPairDist_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPairDist_z(*args)
else: DataExcept()
else:
args = [d.obj,dSub.obj,w.obj,X.obj,sort,subset]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPairPartialDist_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPairPartialDist_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPairPartialDist_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPairPartialDist_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [d.obj,dSub.obj,w.obj,sort]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPairDist_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPairDist_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPairDist_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPairDist_z(*args)
else: DataExcept()
else:
args = [d.obj,dSub.obj,w.obj,sort,subset]
if dSub.tag == sTag: lib.ElHermitianTridiagEigPairPartialDist_s(*args)
elif dSub.tag == dTag: lib.ElHermitianTridiagEigPairPartialDist_d(*args)
elif dSub.tag == cTag: lib.ElHermitianTridiagEigPairPartialDist_c(*args)
elif dSub.tag == zTag: lib.ElHermitianTridiagEigPairPartialDist_z(*args)
else: DataExcept()
return w
else: TypeExcept()
# Hermitian eigensolvers
# ======================
lib.ElHermitianEig_s.argtypes = \
lib.ElHermitianEig_d.argtypes = \
lib.ElHermitianEig_c.argtypes = \
lib.ElHermitianEig_z.argtypes = \
lib.ElHermitianEigDist_s.argtypes = \
lib.ElHermitianEigDist_d.argtypes = \
lib.ElHermitianEigDist_c.argtypes = \
lib.ElHermitianEigDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_uint]
lib.ElHermitianEigPair_s.argtypes = \
lib.ElHermitianEigPair_d.argtypes = \
lib.ElHermitianEigPair_c.argtypes = \
lib.ElHermitianEigPair_z.argtypes = \
lib.ElHermitianEigPairDist_s.argtypes = \
lib.ElHermitianEigPairDist_d.argtypes = \
lib.ElHermitianEigPairDist_c.argtypes = \
lib.ElHermitianEigPairDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_uint]
lib.ElHermitianEigPartial_s.argtypes = \
lib.ElHermitianEigPartial_c.argtypes = \
lib.ElHermitianEigPartialDist_s.argtypes = \
lib.ElHermitianEigPartialDist_c.argtypes = \
[c_uint,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElHermitianEigPartial_d.argtypes = \
lib.ElHermitianEigPartial_z.argtypes = \
lib.ElHermitianEigPartialDist_d.argtypes = \
lib.ElHermitianEigPartialDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
lib.ElHermitianEigPairPartial_s.argtypes = \
lib.ElHermitianEigPairPartial_c.argtypes = \
lib.ElHermitianEigPairPartialDist_s.argtypes = \
lib.ElHermitianEigPairPartialDist_c.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElHermitianEigPairPartial_d.argtypes = \
lib.ElHermitianEigPairPartial_z.argtypes = \
lib.ElHermitianEigPairPartialDist_d.argtypes = \
lib.ElHermitianEigPairPartialDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
def HermitianEig(uplo,A,vectors=False,sort=ASCENDING,subset=None):
if type(A) is Matrix:
w = Matrix(A.tag)
if vectors:
X = Matrix(Base(A.tag))
if subset == None:
args = [uplo,A.obj,w.obj,X.obj,sort]
if A.tag == sTag: lib.ElHermitianEigPair_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPair_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPair_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPair_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,X.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianEigPairPartial_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPairPartial_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPairPartial_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPairPartial_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [uplo,A.obj,w.obj,sort]
if A.tag == sTag: lib.ElHermitianEigPair_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPair_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPair_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPair_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianEigPairPartial_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPairPartial_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPairPartial_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPairPartial_z(*args)
else: DataExcept()
return w
elif type(A) is DistMatrix:
w = DistMatrix(Base(A.tag),STAR,STAR,A.Grid())
if vectors:
X = DistMatrix(A.tag,MC,MR,A.Grid())
if subset == None:
args = [uplo,A.obj,w.obj,X.obj,sort]
if A.tag == sTag: lib.ElHermitianEigPairDist_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPairDist_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPairDist_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPairDist_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,X.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianEigPairPartialDist_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPairPartialDist_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPairPartialDist_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPairPartialDist_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [uplo,A.obj,w.obj,sort]
if A.tag == sTag: lib.ElHermitianEigPairDist_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPairDist_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPairDist_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPairDist_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianEigPairPartialDist_s(*args)
elif A.tag == dTag: lib.ElHermitianEigPairPartialDist_d(*args)
elif A.tag == cTag: lib.ElHermitianEigPairPartialDist_c(*args)
elif A.tag == zTag: lib.ElHermitianEigPairPartialDist_z(*args)
else: DataExcept()
return w
else: TypeExcept()
# Skew-Hermitian eigensolvers
# ===========================
lib.ElSkewHermitianEig_s.argtypes = \
lib.ElSkewHermitianEig_d.argtypes = \
lib.ElSkewHermitianEig_c.argtypes = \
lib.ElSkewHermitianEig_z.argtypes = \
lib.ElSkewHermitianEigDist_s.argtypes = \
lib.ElSkewHermitianEigDist_d.argtypes = \
lib.ElSkewHermitianEigDist_c.argtypes = \
lib.ElSkewHermitianEigDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_uint]
lib.ElSkewHermitianEigPair_s.argtypes = \
lib.ElSkewHermitianEigPair_d.argtypes = \
lib.ElSkewHermitianEigPair_c.argtypes = \
lib.ElSkewHermitianEigPair_z.argtypes = \
lib.ElSkewHermitianEigPairDist_s.argtypes = \
lib.ElSkewHermitianEigPairDist_d.argtypes = \
lib.ElSkewHermitianEigPairDist_c.argtypes = \
lib.ElSkewHermitianEigPairDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_uint]
lib.ElSkewHermitianEigPartial_s.argtypes = \
lib.ElSkewHermitianEigPartial_c.argtypes = \
lib.ElSkewHermitianEigPartialDist_s.argtypes = \
lib.ElSkewHermitianEigPartialDist_c.argtypes = \
[c_uint,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElSkewHermitianEigPartial_d.argtypes = \
lib.ElSkewHermitianEigPartial_z.argtypes = \
lib.ElSkewHermitianEigPartialDist_d.argtypes = \
lib.ElSkewHermitianEigPartialDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
lib.ElSkewHermitianEigPairPartial_s.argtypes = \
lib.ElSkewHermitianEigPairPartial_c.argtypes = \
lib.ElSkewHermitianEigPairPartialDist_s.argtypes = \
lib.ElSkewHermitianEigPairPartialDist_c.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElSkewHermitianEigPairPartial_d.argtypes = \
lib.ElSkewHermitianEigPairPartial_z.argtypes = \
lib.ElSkewHermitianEigPairPartialDist_d.argtypes = \
lib.ElSkewHermitianEigPairPartialDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
def SkewHermitianEig(uplo,A,vectors=False,sort=ASCENDING,subset=None):
if type(A) is Matrix:
w = Matrix(A.tag)
if vectors:
X = Matrix(Base(A.tag))
if subset == None:
args = [uplo,A.obj,w.obj,X.obj,sort]
if A.tag == sTag: lib.ElSkewHermitianEigPair_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPair_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPair_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPair_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,X.obj,sort,subset]
if A.tag == sTag: lib.ElSkewHermitianEigPairPartial_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPairPartial_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPairPartial_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPairPartial_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [uplo,A.obj,w.obj,sort]
if A.tag == sTag: lib.ElSkewHermitianEigPair_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPair_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPair_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPair_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,sort,subset]
if A.tag == sTag: lib.ElSkewHermitianEigPairPartial_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPairPartial_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPairPartial_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPairPartial_z(*args)
else: DataExcept()
return w
elif type(A) is DistMatrix:
w = DistMatrix(Base(A.tag),STAR,STAR,A.Grid())
if vectors:
X = DistMatrix(A.tag,MC,MR,A.Grid())
if subset == None:
args = [uplo,A.obj,w.obj,X.obj,sort]
if A.tag == sTag: lib.ElSkewHermitianEigPairDist_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPairDist_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPairDist_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPairDist_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,X.obj,sort,subset]
if A.tag == sTag: lib.ElSkewHermitianEigPairPartialDist_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPairPartialDist_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPairPartialDist_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPairPartialDist_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [uplo,A.obj,w.obj,sort]
if A.tag == sTag: lib.ElSkewHermitianEigPairDist_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPairDist_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPairDist_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPairDist_z(*args)
else: DataExcept()
else:
args = [uplo,A.obj,w.obj,sort,subset]
if A.tag == sTag: lib.ElSkewHermitianEigPairPartialDist_s(*args)
elif A.tag == dTag: lib.ElSkewHermitianEigPairPartialDist_d(*args)
elif A.tag == cTag: lib.ElSkewHermitianEigPairPartialDist_c(*args)
elif A.tag == zTag: lib.ElSkewHermitianEigPairPartialDist_z(*args)
else: DataExcept()
return w
else: TypeExcept()
# Hermitian generalized-definite eigensolvers
# ===========================================
lib.ElHermitianGenDefEig_s.argtypes = \
lib.ElHermitianGenDefEig_d.argtypes = \
lib.ElHermitianGenDefEig_c.argtypes = \
lib.ElHermitianGenDefEig_z.argtypes = \
lib.ElHermitianGenDefEigDist_s.argtypes = \
lib.ElHermitianGenDefEigDist_d.argtypes = \
lib.ElHermitianGenDefEigDist_c.argtypes = \
lib.ElHermitianGenDefEigDist_z.argtypes = \
[c_uint,c_uint,c_void_p,c_void_p,c_void_p,c_uint]
lib.ElHermitianGenDefEigPair_s.argtypes = \
lib.ElHermitianGenDefEigPair_d.argtypes = \
lib.ElHermitianGenDefEigPair_c.argtypes = \
lib.ElHermitianGenDefEigPair_z.argtypes = \
lib.ElHermitianGenDefEigPairDist_s.argtypes = \
lib.ElHermitianGenDefEigPairDist_d.argtypes = \
lib.ElHermitianGenDefEigPairDist_c.argtypes = \
lib.ElHermitianGenDefEigPairDist_z.argtypes = \
[c_uint,c_uint,c_void_p,c_void_p,c_void_p,c_void_p,c_uint]
lib.ElHermitianGenDefEigPartial_s.argtypes = \
lib.ElHermitianGenDefEigPartial_c.argtypes = \
lib.ElHermitianGenDefEigPartialDist_s.argtypes = \
lib.ElHermitianGenDefEigPartialDist_c.argtypes = \
[c_uint,c_uint,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_s]
lib.ElHermitianGenDefEigPartial_d.argtypes = \
lib.ElHermitianGenDefEigPartial_z.argtypes = \
lib.ElHermitianGenDefEigPartialDist_d.argtypes = \
lib.ElHermitianGenDefEigPartialDist_z.argtypes = \
[c_uint,c_uint,c_void_p,c_void_p,c_void_p,c_uint,HermitianEigSubset_d]
lib.ElHermitianGenDefEigPairPartial_s.argtypes = \
lib.ElHermitianGenDefEigPairPartial_c.argtypes = \
lib.ElHermitianGenDefEigPairPartialDist_s.argtypes = \
lib.ElHermitianGenDefEigPairPartialDist_c.argtypes = \
[c_uint,c_uint,c_void_p,c_void_p,c_void_p,c_void_p,c_uint,
HermitianEigSubset_s]
lib.ElHermitianGenDefEigPairPartial_d.argtypes = \
lib.ElHermitianGenDefEigPairPartial_z.argtypes = \
lib.ElHermitianGenDefEigPairPartialDist_d.argtypes = \
lib.ElHermitianGenDefEigPairPartialDist_z.argtypes = \
[c_uint,c_uint,c_void_p,c_void_p,c_void_p,c_void_p,c_uint,
HermitianEigSubset_d]
def HermitianGenDefEig(uplo,A,vectors=False,sort=ASCENDING,subset=None):
if type(A) is Matrix:
w = Matrix(A.tag)
if vectors:
X = Matrix(Base(A.tag))
if subset == None:
args = [pencil,uplo,A.obj,B.obj,w.obj,X.obj,sort]
if A.tag == sTag: lib.ElHermitianGenDefEigPair_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPair_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPair_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPair_z(*args)
else: DataExcept()
else:
args = [pencil,uplo,A.obj,B.obj,w.obj,X.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianGenDefEigPairPartial_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPairPartial_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPairPartial_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPairPartial_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [pencil,uplo,A.obj,B.obj,w.obj,sort]
if A.tag == sTag: lib.ElHermitianGenDefEigPair_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPair_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPair_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPair_z(*args)
else: DataExcept()
else:
args = [pencil,uplo,A.obj,B.obj,w.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianGenDefEigPairPartial_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPairPartial_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPairPartial_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPairPartial_z(*args)
else: DataExcept()
return w
elif type(A) is DistMatrix:
w = DistMatrix(Base(A.tag),STAR,STAR,A.Grid())
if vectors:
X = DistMatrix(A.tag,MC,MR,A.Grid())
if subset == None:
args = [pencil,uplo,A.obj,B.obj,w.obj,X.obj,sort]
if A.tag == sTag: lib.ElHermitianGenDefEigPairDist_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPairDist_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPairDist_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPairDist_z(*args)
else: DataExcept()
else:
args = [pencil,uplo,A.obj,B.obj,w.obj,X.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianGenDefEigPairPartialDist_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPairPartialDist_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPairPartialDist_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPairPartialDist_z(*args)
else: DataExcept()
return w, X
else:
if subset == None:
args = [pencil,uplo,A.obj,B.obj,w.obj,sort]
if A.tag == sTag: lib.ElHermitianGenDefEigPairDist_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPairDist_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPairDist_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPairDist_z(*args)
else: DataExcept()
else:
args = [pencil,uplo,A.obj,B.obj,w.obj,sort,subset]
if A.tag == sTag: lib.ElHermitianGenDefEigPairPartialDist_s(*args)
elif A.tag == dTag: lib.ElHermitianGenDefEigPairPartialDist_d(*args)
elif A.tag == cTag: lib.ElHermitianGenDefEigPairPartialDist_c(*args)
elif A.tag == zTag: lib.ElHermitianGenDefEigPairPartialDist_z(*args)
else: DataExcept()
return w
else: TypeExcept()
# Hermitian SVD
# =============
lib.ElHermitianSingularValues_s.argtypes = \
lib.ElHermitianSingularValues_d.argtypes = \
lib.ElHermitianSingularValues_c.argtypes = \
lib.ElHermitianSingularValues_z.argtypes = \
lib.ElHermitianSingularValuesDist_s.argtypes = \
lib.ElHermitianSingularValuesDist_d.argtypes = \
lib.ElHermitianSingularValuesDist_c.argtypes = \
lib.ElHermitianSingularValuesDist_z.argtypes = \
[c_uint,c_void_p,c_void_p]
lib.ElHermitianSVD_s.argtypes = \
lib.ElHermitianSVD_d.argtypes = \
lib.ElHermitianSVD_c.argtypes = \
lib.ElHermitianSVD_z.argtypes = \
lib.ElHermitianSVDDist_s.argtypes = \
lib.ElHermitianSVDDist_d.argtypes = \
lib.ElHermitianSVDDist_c.argtypes = \
lib.ElHermitianSVDDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p,c_void_p]
def HermitianSVD(uplo,A,vectors=True):
if type(A) is Matrix:
s = Matrix(Base(A.tag))
if vectors:
U = Matrix(A.tag)
V = Matrix(A.tag)
args = [uplo,A.obj,s.obj,U.obj,V.obj]
if A.tag == sTag: lib.ElHermitianSVD_s(*args)
elif A.tag == dTag: lib.ElHermitianSVD_d(*args)
elif A.tag == cTag: lib.ElHermitianSVD_c(*args)
elif A.tag == zTag: lib.ElHermitianSVD_z(*args)
else: DataExcept()
return U, s, V
else:
args = [uplo,A.obj,s.obj]
if A.tag == sTag: lib.ElHermitianSingularValues_s(*args)
elif A.tag == dTag: lib.ElHermitianSingularValues_d(*args)
elif A.tag == cTag: lib.ElHermitianSingularValues_c(*args)
elif A.tag == zTag: lib.ElHermitianSingularValues_z(*args)
else: DataExcept()
return s
elif type(A) is DistMatrix:
s = DistMatrix(Base(A.tag),STAR,STAR,A.Grid())
if vectors:
U = DistMatrix(A.tag,MC,MR,A.Grid())
V = DistMatrix(A.tag,MC,MR,A.Grid())
args = [uplo,A.obj,s.obj,U.obj,V.obj]
if A.tag == sTag: lib.ElHermitianSVDDist_s(*args)
elif A.tag == dTag: lib.ElHermitianSVDDist_d(*args)
elif A.tag == cTag: lib.ElHermitianSVDDist_c(*args)
elif A.tag == zTag: lib.ElHermitianSVDDist_z(*args)
else: DataExcept()
return U, s, V
else:
args = [uplo,A.obj,s.obj]
if A.tag == sTag: lib.ElHermitianSingularValuesDist_s(*args)
elif A.tag == dTag: lib.ElHermitianSingularValuesDist_d(*args)
elif A.tag == cTag: lib.ElHermitianSingularValuesDist_c(*args)
elif A.tag == zTag: lib.ElHermitianSingularValuesDist_z(*args)
else: DataExcept()
return s
else: TypeExcept()
# Polar decomposition
# ===================
lib.ElPolar_s.argtypes = \
lib.ElPolar_d.argtypes = \
lib.ElPolar_c.argtypes = \
lib.ElPolar_z.argtypes = \
lib.ElPolarDist_s.argtypes = \
lib.ElPolarDist_d.argtypes = \
lib.ElPolarDist_c.argtypes = \
lib.ElPolarDist_z.argtypes = \
[c_void_p]
lib.ElPolarDecomp_s.argtypes = \
lib.ElPolarDecomp_d.argtypes = \
lib.ElPolarDecomp_c.argtypes = \
lib.ElPolarDecomp_z.argtypes = \
lib.ElPolarDecompDist_s.argtypes = \
lib.ElPolarDecompDist_d.argtypes = \
lib.ElPolarDecompDist_c.argtypes = \
lib.ElPolarDecompDist_z.argtypes = \
[c_void_p,c_void_p]
def Polar(A,fullDecomp=False):
if type(A) is Matrix:
if fullDecomp:
P = Matrix(A.tag)
args = [A.obj,P.obj]
if A.tag == sTag: lib.ElPolarDecomp_s(*args)
elif A.tag == dTag: lib.ElPolarDecomp_d(*args)
elif A.tag == cTag: lib.ElPolarDecomp_c(*args)
elif A.tag == zTag: lib.ElPolarDecomp_z(*args)
else: DataExcept()
return P
else:
args = [A.obj]
if A.tag == sTag: lib.ElPolar_s(*args)
elif A.tag == dTag: lib.ElPolar_d(*args)
elif A.tag == cTag: lib.ElPolar_c(*args)
elif A.tag == zTag: lib.ElPolar_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if fullDecomp:
P = DistMatrix(A.tag)
args = [A.obj,P.obj]
if A.tag == sTag: lib.ElPolarDecompDist_s(*args)
elif A.tag == dTag: lib.ElPolarDecompDist_d(*args)
elif A.tag == cTag: lib.ElPolarDecompDist_c(*args)
elif A.tag == zTag: lib.ElPolarDecompDist_z(*args)
else: DataExcept()
return P
else:
args = [A.obj]
if A.tag == sTag: lib.ElPolarDist_s(*args)
elif A.tag == dTag: lib.ElPolarDist_d(*args)
elif A.tag == cTag: lib.ElPolarDist_c(*args)
elif A.tag == zTag: lib.ElPolarDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHermitianPolar_s.argtypes = \
lib.ElHermitianPolar_d.argtypes = \
lib.ElHermitianPolar_c.argtypes = \
lib.ElHermitianPolar_z.argtypes = \
lib.ElHermitianPolarDist_s.argtypes = \
lib.ElHermitianPolarDist_d.argtypes = \
lib.ElHermitianPolarDist_c.argtypes = \
lib.ElHermitianPolarDist_z.argtypes = \
[c_uint,c_void_p]
lib.ElHermitianPolarDecomp_s.argtypes = \
lib.ElHermitianPolarDecomp_d.argtypes = \
lib.ElHermitianPolarDecomp_c.argtypes = \
lib.ElHermitianPolarDecomp_z.argtypes = \
lib.ElHermitianPolarDecompDist_s.argtypes = \
lib.ElHermitianPolarDecompDist_d.argtypes = \
lib.ElHermitianPolarDecompDist_c.argtypes = \
lib.ElHermitianPolarDecompDist_z.argtypes = \
[c_uint,c_void_p,c_void_p]
def HermitianPolar(uplo,A,fullDecomp=False):
if type(A) is Matrix:
if fullDecomp:
P = Matrix(A.tag)
args = [uplo,A.obj,P.obj]
if A.tag == sTag: lib.ElHermitianPolarDecomp_s(*args)
elif A.tag == dTag: lib.ElHermitianPolarDecomp_d(*args)
elif A.tag == cTag: lib.ElHermitianPolarDecomp_c(*args)
elif A.tag == zTag: lib.ElHermitianPolarDecomp_z(*args)
else: DataExcept()
return P
else:
args = [uplo,A.obj]
if A.tag == sTag: lib.ElHermitianPolar_s(*args)
elif A.tag == dTag: lib.ElHermitianPolar_d(*args)
elif A.tag == cTag: lib.ElHermitianPolar_c(*args)
elif A.tag == zTag: lib.ElHermitianPolar_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if fullDecomp:
P = Matrix(A.tag)
args = [uplo,A.obj,P.obj]
if A.tag == sTag: lib.ElHermitianPolarDecompDist_s(*args)
elif A.tag == dTag: lib.ElHermitianPolarDecompDist_d(*args)
elif A.tag == cTag: lib.ElHermitianPolarDecompDist_c(*args)
elif A.tag == zTag: lib.ElHermitianPolarDecompDist_z(*args)
else: DataExcept()
return P
else:
args = [uplo,A.obj]
if A.tag == sTag: lib.ElHermitianPolarDist_s(*args)
elif A.tag == dTag: lib.ElHermitianPolarDist_d(*args)
elif A.tag == cTag: lib.ElHermitianPolarDist_c(*args)
elif A.tag == zTag: lib.ElHermitianPolarDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Schur decomposition
# ===================
# Emulate an enum for the sign scaling
(SIGN_SCALE_NONE,SIGN_SCALE_DET,SIGN_SCALE_FROB)=(0,1,2)
lib.ElSignCtrlDefault_s.argtypes = [c_void_p]
class SignCtrl_s(ctypes.Structure):
_fields_ = [("maxIts",iType),
("tol",sType),
("power",sType),
("scaling",c_uint)]
def __init__(self):
lib.ElSignCtrlDefault_s(pointer(self))
lib.ElSignCtrlDefault_d.argtypes = [c_void_p]
class SignCtrl_d(ctypes.Structure):
_fields_ = [("maxIts",iType),
("tol",dType),
("power",dType),
("scaling",c_uint)]
def __init__(self):
lib.ElSignCtrlDefault_d(pointer(self))
lib.ElHessQRCtrlDefault.argtypes = [c_void_p]
class HessQRCtrl(ctypes.Structure):
_fields_ = [("distAED",bType),
("blockHeight",iType),("blockWidth",iType)]
def __init__(self):
lib.ElHessQRCtrlDefault(pointer(self))
lib.ElSDCCtrlDefault_s.argtypes = [c_void_p]
class SDCCtrl_s(ctypes.Structure):
_fields_ = [("cutoff",iType),
("maxInnerIts",iType),("maxOuterIts",iType),
("tol",sType),
("spreadFactor",sType),
("random",bType),
("progress",bType),
("signCtrl",SignCtrl_s)]
def __init__(self):
lib.ElSDCCtrlDefault_s(pointer(self))
lib.ElSDCCtrlDefault_d.argtypes = [c_void_p]
class SDCCtrl_d(ctypes.Structure):
_fields_ = [("cutoff",iType),
("maxInnerIts",iType),("maxOuterIts",iType),
("tol",dType),
("spreadFactor",dType),
("random",bType),
("progress",bType),
("signCtrl",SignCtrl_d)]
def __init__(self):
lib.ElSDCCtrlDefault_d(pointer(self))
lib.ElSchurCtrlDefault_s.argtypes = [c_void_p]
class SchurCtrl_s(ctypes.Structure):
_fields_ = [("useSDC",bType),
("qrCtrl",HessQRCtrl),
("sdcCtrl",SDCCtrl_s)]
def __init__(self):
lib.ElSchurCtrlDefault_s(pointer(self))
lib.ElSchurCtrlDefault_d.argtypes = [c_void_p]
class SchurCtrl_d(ctypes.Structure):
_fields_ = [("useSDC",bType),
("qrCtrl",HessQRCtrl),
("sdcCtrl",SDCCtrl_d)]
def __init__(self):
lib.ElSchurCtrlDefault_d(pointer(self))
lib.ElSchur_s.argtypes = \
lib.ElSchur_d.argtypes = \
lib.ElSchur_c.argtypes = \
lib.ElSchur_z.argtypes = \
lib.ElSchurDist_s.argtypes = \
lib.ElSchurDist_d.argtypes = \
lib.ElSchurDist_c.argtypes = \
lib.ElSchurDist_z.argtypes = \
[c_void_p,c_void_p,bType]
lib.ElSchurDecomp_s.argtypes = \
lib.ElSchurDecomp_d.argtypes = \
lib.ElSchurDecomp_c.argtypes = \
lib.ElSchurDecomp_z.argtypes = \
lib.ElSchurDecompDist_s.argtypes = \
lib.ElSchurDecompDist_d.argtypes = \
lib.ElSchurDecompDist_c.argtypes = \
lib.ElSchurDecompDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,bType]
def Schur(A,fullTriangle=True,vectors=False):
if type(A) is Matrix:
w = Matrix(Complexify(A.tag))
if vectors:
Q = Matrix(A.tag)
args = [A.obj,w.obj,Q.obj,fullTriangle]
if A.tag == sTag: lib.ElSchurDecomp_s(*args)
elif A.tag == dTag: lib.ElSchurDecomp_d(*args)
elif A.tag == cTag: lib.ElSchurDecomp_c(*args)
elif A.tag == zTag: lib.ElSchurDecomp_z(*args)
else: DataExcept()
return w, Q
else:
args = [A.obj,w.obj,fullTriangle]
if A.tag == sTag: lib.ElSchur_s(*args)
elif A.tag == dTag: lib.ElSchur_d(*args)
elif A.tag == cTag: lib.ElSchur_c(*args)
elif A.tag == zTag: lib.ElSchur_z(*args)
else: DataExcept()
return w
elif type(A) is DistMatrix:
w = DistMatrix(Complexify(A.tag),STAR,STAR,A.Grid())
if vectors:
Q = DistMatrix(A.tag,MC,MR,A.Grid())
args = [A.obj,w.obj,Q.obj,fullTriangle]
if A.tag == sTag: lib.ElSchurDecompDist_s(*args)
elif A.tag == dTag: lib.ElSchurDecompDist_d(*args)
elif A.tag == cTag: lib.ElSchurDecompDist_c(*args)
elif A.tag == zTag: lib.ElSchurDecompDist_z(*args)
else: DataExcept()
return w, Q
else:
args = [A.obj,w.obj,fullTriangle]
if A.tag == sTag: lib.ElSchurDist_s(*args)
elif A.tag == dTag: lib.ElSchurDist_d(*args)
elif A.tag == cTag: lib.ElSchurDist_c(*args)
elif A.tag == zTag: lib.ElSchurDist_z(*args)
else: DataExcept()
return w
else: TypeExcept()
# Singular value decomposition
# ============================
lib.ElSVD_s.argtypes = \
lib.ElSVD_d.argtypes = \
lib.ElSVD_c.argtypes = \
lib.ElSVD_z.argtypes = \
lib.ElSVDDist_s.argtypes = \
lib.ElSVDDist_d.argtypes = \
lib.ElSVDDist_c.argtypes = \
lib.ElSVDDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p]
lib.ElSingularValues_s.argtypes = \
lib.ElSingularValues_d.argtypes = \
lib.ElSingularValues_c.argtypes = \
lib.ElSingularValues_z.argtypes = \
lib.ElSingularValuesDist_s.argtypes = \
lib.ElSingularValuesDist_d.argtypes = \
lib.ElSingularValuesDist_c.argtypes = \
lib.ElSingularValuesDist_z.argtypes = \
[c_void_p,c_void_p]
def SVD(A,vectors=False):
if type(A) is Matrix:
s = Matrix(Base(A.tag))
if vectors:
V = Matrix(A.tag)
args = [A.obj,s.obj,V.obj]
if A.tag == sTag: lib.ElSVD_s(*args)
elif A.tag == dTag: lib.ElSVD_d(*args)
elif A.tag == cTag: lib.ElSVD_c(*args)
elif A.tag == zTag: lib.ElSVD_z(*args)
else: DataExcept()
return s, V
else:
args = [A.obj,s.obj]
if A.tag == sTag: lib.ElSingularValues_s(*args)
elif A.tag == dTag: lib.ElSingularValues_d(*args)
elif A.tag == cTag: lib.ElSingularValues_c(*args)
elif A.tag == zTag: lib.ElSingularValues_z(*args)
else: DataExcept()
return s
elif type(A) is DistMatrix:
s = DistMatrix(Base(A.tag),STAR,STAR,A.Grid())
if vectors:
V = DistMatrix(A.tag,MC,MR,A.Grid())
args = [A.obj,s.obj,V.obj]
if A.tag == sTag: lib.ElSVDDist_s(*args)
elif A.tag == dTag: lib.ElSVDDist_d(*args)
elif A.tag == cTag: lib.ElSVDDist_c(*args)
elif A.tag == zTag: lib.ElSVDDist_z(*args)
else: DataExcept()
return s, V
else:
args = [A.obj,s.obj]
if A.tag == sTag: lib.ElSingularValuesDist_s(*args)
elif A.tag == dTag: lib.ElSingularValuesDist_d(*args)
elif A.tag == cTag: lib.ElSingularValuesDist_c(*args)
elif A.tag == zTag: lib.ElSingularValuesDist_z(*args)
else: DataExcept()
return s
else: TypeExcept()
# Product Lanczos
# ===============
lib.ElProductLanczosSparse_s.argtypes = \
lib.ElProductLanczosSparse_d.argtypes = \
lib.ElProductLanczosSparse_c.argtypes = \
lib.ElProductLanczosSparse_z.argtypes = \
lib.ElProductLanczosDistSparse_s.argtypes = \
lib.ElProductLanczosDistSparse_d.argtypes = \
lib.ElProductLanczosDistSparse_c.argtypes = \
lib.ElProductLanczosDistSparse_z.argtypes = \
[c_void_p,c_void_p,iType]
def ProductLanczos(A,basisSize=20):
T = Matrix(Base(A.tag))
args = [A.obj,T.obj,basisSize]
if type(A) is SparseMatrix:
if A.tag == sTag: lib.ElProductLanczosSparse_s(*args)
elif A.tag == dTag: lib.ElProductLanczosSparse_d(*args)
elif A.tag == cTag: lib.ElProductLanczosSparse_c(*args)
elif A.tag == zTag: lib.ElProductLanczosSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElProductLanczosDistSparse_s(*args)
elif A.tag == dTag: lib.ElProductLanczosDistSparse_d(*args)
elif A.tag == cTag: lib.ElProductLanczosDistSparse_c(*args)
elif A.tag == zTag: lib.ElProductLanczosDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
return T
lib.ElProductLanczosDecompSparse_s.argtypes = \
lib.ElProductLanczosDecompSparse_c.argtypes = \
lib.ElProductLanczosDecompDistSparse_s.argtypes = \
lib.ElProductLanczosDecompDistSparse_c.argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,POINTER(sType),iType]
lib.ElProductLanczosDecompSparse_d.argtypes = \
lib.ElProductLanczosDecompSparse_z.argtypes = \
lib.ElProductLanczosDecompDistSparse_d.argtypes = \
lib.ElProductLanczosDecompDistSparse_z.argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,POINTER(dType),iType]
def ProductLanczosDecomp(A,basisSize=20):
T = Matrix(Base(A.tag))
beta = TagToType(Base(A.tag))()
if type(A) is SparseMatrix:
V = Matrix(A.tag)
v = Matrix(A.tag)
args = [A.obj,V.obj,T.obj,v.obj,pointer(beta),basisSize]
if A.tag == sTag: lib.ElProductLanczosDecompSparse_s(*args)
elif A.tag == dTag: lib.ElProductLanczosDecompSparse_d(*args)
elif A.tag == cTag: lib.ElProductLanczosDecompSparse_c(*args)
elif A.tag == zTag: lib.ElProductLanczosDecompSparse_z(*args)
else: DataExcept()
return V, T, v, beta.value
elif type(A) is DistSparseMatrix:
V = DistMultiVec(A.tag,A.Comm())
v = DistMultiVec(A.tag,A.Comm())
args = [A.obj,V.obj,T.obj,v.obj,pointer(beta),basisSize]
if A.tag == sTag: lib.ElProductLanczosDecompDistSparse_s(*args)
elif A.tag == dTag: lib.ElProductLanczosDecompDistSparse_d(*args)
elif A.tag == cTag: lib.ElProductLanczosDecompDistSparse_c(*args)
elif A.tag == zTag: lib.ElProductLanczosDecompDistSparse_z(*args)
else: DataExcept()
return V, T, v, beta.value
else: TypeExcept()
# Extremal singular value estimation
# ==================================
lib.ElExtremalSingValEstSparse_s.argtypes = \
lib.ElExtremalSingValEstSparse_c.argtypes = \
lib.ElExtremalSingValEstDistSparse_s.argtypes = \
lib.ElExtremalSingValEstDistSparse_c.argtypes = \
[c_void_p,iType,POINTER(sType),POINTER(sType)]
lib.ElExtremalSingValEstSparse_d.argtypes = \
lib.ElExtremalSingValEstSparse_z.argtypes = \
lib.ElExtremalSingValEstDistSparse_d.argtypes = \
lib.ElExtremalSingValEstDistSparse_z.argtypes = \
[c_void_p,iType,POINTER(dType),POINTER(dType)]
def ExtremalSingValEst(A,basisSize=20):
sigMin = TagToType(Base(A.tag))()
sigMax = TagToType(Base(A.tag))()
args = [A.obj,basisSize,pointer(sigMin),pointer(sigMax)]
if type(A) is SparseMatrix:
if A.tag == sTag: lib.ElExtremalSingValEstSparse_s(*args)
elif A.tag == dTag: lib.ElExtremalSingValEstSparse_d(*args)
elif A.tag == cTag: lib.ElExtremalSingValEstSparse_c(*args)
elif A.tag == zTag: lib.ElExtremalSingValEstSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElExtremalSingValEstDistSparse_s(*args)
elif A.tag == dTag: lib.ElExtremalSingValEstDistSparse_d(*args)
elif A.tag == cTag: lib.ElExtremalSingValEstDistSparse_c(*args)
elif A.tag == zTag: lib.ElExtremalSingValEstDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
return sigMin, sigMax
# Pseudospectra
# =============
lib.ElSnapshotCtrlDefault.argtypes = \
lib.ElSnapshotCtrlDestroy.argtypes = \
[c_void_p]
class SnapshotCtrl(ctypes.Structure):
_fields_ = [("realSize",iType),("imagSize",iType),
("imgSaveFreq",iType),("numSaveFreq",iType),
("imgDispFreq",iType),
("imgSaveCount",iType),("numSaveCount",iType),
("imgDispCount",iType),
("imgBase",c_char_p),("numBase",c_char_p),
("imgFormat",c_uint),("numFormat",c_uint),
("itCounts",bType)]
def __init__(self):
lib.ElSnaphsotCtrlDefault(pointer(self))
def Destroy(self):
lib.ElSnapshotCtrlDestroy(pointer(self))
# Emulate an enum for the pseudospectral norm
(PS_TWO_NORM,PS_ONE_NORM)=(0,1)
lib.ElPseudospecCtrlDefault_s.argtypes = \
lib.ElPseudospecCtrlDestroy_s.argtypes = \
[c_void_p]
class PseudospecCtrl_s(ctypes.Structure):
_fields_ = [("norm",c_uint),
("blockWidth",iType),
("schur",bType),
("forceComplexSchur",bType),
("forceComplexPs",bType),
("schurCtrl",SchurCtrl_s),
("maxIts",iType),
("tol",sType),
("deflate",bType),
("arnoldi",bType),
("basisSize",iType),
("reorthog",bType),
("progress",bType),
("snapCtrl",SnapshotCtrl),
("center",cType),
("realWidth",sType),
("imagWidth",sType)]
def __init__(self):
lib.ElPseudospecCtrlDefault_s(pointer(self))
def Destroy(self):
lib.ElPseudospecCtrlDestroy_s(pointer(self))
lib.ElPseudospecCtrlDefault_d.argtypes = \
lib.ElPseudospecCtrlDestroy_d.argtypes = \
[c_void_p]
class PseudospecCtrl_d(ctypes.Structure):
_fields_ = [("norm",c_uint),
("blockWidth",iType),
("schur",bType),
("forceComplexSchur",bType),
("forceComplexPs",bType),
("schurCtrl",SchurCtrl_d),
("maxIts",iType),
("tol",dType),
("deflate",bType),
("arnoldi",bType),
("basisSize",iType),
("reorthog",bType),
("progress",bType),
("snapCtrl",SnapshotCtrl),
("center",zType),
("realWidth",dType),
("imagWidth",dType)]
def __init__(self):
lib.ElPseudospecCtrlDefault_d(pointer(self))
def Destroy(self):
lib.ElPseudospecCtrlDestroy_d(pointer(self))
class SpectralBox_s(ctypes.Structure):
_fields_ = [("center",cType),
("realWidth",sType),
("imagWidth",sType)]
class SpectralBox_d(ctypes.Structure):
_fields_ = [("center",zType),
("realWidth",dType),
("imagWidth",dType)]
def DisplayPortrait(portrait,box,title='',tryPython=True):
import math
if tryPython:
if type(portrait) is Matrix:
EntrywiseMap(portrait,math.log10)
try:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
isInline = 'inline' in mpl.get_backend()
isVec = min(portrait.Height(),portrait.Width()) == 1
fig = plt.figure()
axis = fig.add_axes([0.1,0.1,0.8,0.8])
if isVec:
axis.plot(np.squeeze(portrait.ToNumPy()),'bo-')
else:
lBound = box.center.real - box.realWidth/2
rBound = box.center.real + box.realWidth/2
bBound = box.center.imag - box.imagWidth/2
tBound = box.center.imag + box.imagWidth/2
im = axis.imshow(portrait.ToNumPy(),
extent=[lBound,rBound,bBound,tBound])
fig.colorbar(im,ax=axis)
plt.title(title)
plt.draw()
if not isInline:
plt.show(block=False)
return
except:
print 'Could not import matplotlib.pyplot'
elif type(portrait) is DistMatrix:
portrait_CIRC_CIRC = DistMatrix(portrait.tag,CIRC,CIRC,portrait.Grid())
Copy(portrait,portrait_CIRC_CIRC)
if portrait_CIRC_CIRC.CrossRank() == portrait_CIRC_CIRC.Root():
DisplayPortrait(portrait_CIRC_CIRC.Matrix(),box,title,True)
return
# Fall back to the built-in Display if we have not succeeded
if not tryPython or type(portrait) is not Matrix:
EntrywiseMap(portrait,math.log10)
args = [portrait.obj,title]
numMsExtra = 200
if type(portrait) is Matrix:
if portrait.tag == sTag: lib.ElDisplay_s(*args)
elif portrait.tag == dTag: lib.ElDisplay_d(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
elif type(portrait) is DistMatrix:
if portrait.tag == sTag: lib.ElDisplayDist_s(*args)
elif portrait.tag == dTag: lib.ElDisplayDist_d(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
else: TypeExcept()
# (Pseudo-)Spectral portrait
# --------------------------
# The choice is based upon a few different norms of the Schur factor, as simply
# using the spectral radius would be insufficient for highly non-normal
# matrices, e.g., a Jordan block with eigenvalue zero
lib.ElSpectralPortrait_s.argtypes = \
lib.ElSpectralPortrait_c.argtypes = \
lib.ElSpectralPortraitDist_s.argtypes = \
lib.ElSpectralPortraitDist_c.argtypes = \
[c_void_p,c_void_p,iType,iType,POINTER(SpectralBox_s)]
lib.ElSpectralPortrait_d.argtypes = \
lib.ElSpectralPortrait_z.argtypes = \
lib.ElSpectralPortraitDist_d.argtypes = \
lib.ElSpectralPortraitDist_z.argtypes = \
[c_void_p,c_void_p,iType,iType,POINTER(SpectralBox_d)]
lib.ElSpectralPortraitX_s.argtypes = \
lib.ElSpectralPortraitX_c.argtypes = \
lib.ElSpectralPortraitXDist_s.argtypes = \
lib.ElSpectralPortraitXDist_c.argtypes = \
[c_void_p,c_void_p,iType,iType,POINTER(SpectralBox_s),PseudospecCtrl_s]
lib.ElSpectralPortraitX_d.argtypes = \
lib.ElSpectralPortraitX_z.argtypes = \
lib.ElSpectralPortraitXDist_d.argtypes = \
lib.ElSpectralPortraitXDist_z.argtypes = \
[c_void_p,c_void_p,iType,iType,POINTER(SpectralBox_d),PseudospecCtrl_d]
def SpectralPortrait(A,realSize=200,imagSize=200,ctrl=None):
if type(A) is Matrix:
invNormMap = Matrix(Base(A.tag))
if A.tag == sTag:
box = SpectralBox_s()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortrait_s(*args)
else: lib.ElSpectralPortraitX_s(*argsCtrl)
elif A.tag == dTag:
box = SpectralBox_d()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortrait_d(*args)
else: lib.ElSpectralPortraitX_d(*argsCtrl)
elif A.tag == cTag:
box = SpectralBox_s()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortrait_c(*args)
else: lib.ElSpectralPortraitX_c(*argsCtrl)
elif A.tag == zTag:
box = SpectralBox_d()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortrait_z(*args)
else: lib.ElSpectralPortraitX_z(*argsCtrl)
else: DataExcept()
return invNormMap, box
elif type(A) is DistMatrix:
invNormMap = DistMatrix(Base(A.tag),MC,MR,A.Grid())
if A.tag == sTag:
box = SpectralBox_s()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortraitDist_s(*args)
else: lib.ElSpectralPortraitXDist_s(*argsCtrl)
elif A.tag == dTag:
box = SpectralBox_d()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortraitDist_d(*args)
else: lib.ElSpectralPortraitXDist_d(*argsCtrl)
elif A.tag == cTag:
box = SpectralBox_s()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortraitDist_c(*args)
else: lib.ElSpectralPortraitXDist_c(*argsCtrl)
elif A.tag == zTag:
box = SpectralBox_d()
args = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box)]
argsCtrl = [A.obj,invNormMap.obj,realSize,imagSize,pointer(box),ctrl]
if ctrl == None: lib.ElSpectralPortraitDist_z(*args)
else: lib.ElSpectralPortraitXDist_z(*argsCtrl)
else: DataExcept()
return invNormMap, box
else: TypeExcept()
# (Pseudo-)Spectral window
# ------------------------
lib.ElSpectralWindow_s.argtypes = \
lib.ElSpectralWindowDist_s.argtypes = \
[c_void_p,c_void_p,cType,sType,sType,iType,iType]
lib.ElSpectralWindow_d.argtypes = \
lib.ElSpectralWindowDist_d.argtypes = \
[c_void_p,c_void_p,zType,dType,dType,iType,iType]
lib.ElSpectralWindow_c.argtypes = \
lib.ElSpectralWindowDist_c.argtypes = \
[c_void_p,c_void_p,cType,sType,sType,iType,iType]
lib.ElSpectralWindow_z.argtypes = \
lib.ElSpectralWindowDist_z.argtypes = \
[c_void_p,c_void_p,zType,dType,dType,iType,iType]
lib.ElSpectralWindowX_s.argtypes = \
lib.ElSpectralWindowXDist_s.argtypes = \
[c_void_p,c_void_p,cType,sType,sType,iType,iType,PseudospecCtrl_s]
lib.ElSpectralWindowX_d.argtypes = \
lib.ElSpectralWindowXDist_d.argtypes = \
[c_void_p,c_void_p,zType,dType,dType,iType,iType,PseudospecCtrl_d]
lib.ElSpectralWindowX_c.argtypes = \
lib.ElSpectralWindowXDist_c.argtypes = \
[c_void_p,c_void_p,cType,sType,sType,iType,iType,PseudospecCtrl_s]
lib.ElSpectralWindowX_z.argtypes = \
lib.ElSpectralWindowXDist_z.argtypes = \
[c_void_p,c_void_p,zType,dType,dType,iType,iType,PseudospecCtrl_d]
def SpectralWindow \
(A,centerPre,realWidth,imagWidth,realSize=200,imagSize=200,ctrl=None):
center = TagToType(Complexify(A.tag))(centerPre)
if type(A) is Matrix:
invNormMap = Matrix(Base(A.tag))
args = [A.obj,invNormMap.obj,center,realWidth,imagWidth,realSize,imagSize]
argsCtrl = [A.obj,invNormMap.obj,center,realWidth,imagWidth,
realSize,imagSize,ctrl]
if A.tag == sTag:
if ctrl == None: lib.ElSpectralWindow_s(*args)
else: lib.ElSpectralWindowX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSpectralWindow_d(*args)
else: lib.ElSpectralWindowX_d(*argsCtrl)
elif A.tag == cTag:
if ctrl == None: lib.ElSpectralWindow_c(*args)
else: lib.ElSpectralWindowX_c(*argsCtrl)
elif A.tag == zTag:
if ctrl == None: lib.ElSpectralWindow_z(*args)
else: lib.ElSpectralWindowX_z(*argsCtrl)
else: DataExcept()
return invNormMap
elif type(A) is DistMatrix:
invNormMap = DistMatrix(Base(A.tag),MC,MR,A.Grid())
args = [A.obj,invNormMap.obj,center,realWidth,imagWidth,realSize,imagSize]
argsCtrl = [A.obj,invNormMap.obj,center,realWidth,imagWidth,
realSize,imagSize,ctrl]
if A.tag == sTag:
if ctrl == None: lib.ElSpectralWindowDist_s(*args)
else: lib.ElSpectralWindowXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSpectralWindowDist_d(*args)
else: lib.ElSpectralWindowXDist_d(*argsCtrl)
elif A.tag == cTag:
if ctrl == None: lib.ElSpectralWindowDist_c(*args)
else: lib.ElSpectralWindowXDist_c(*argsCtrl)
elif A.tag == zTag:
if ctrl == None: lib.ElSpectralWindowDist_z(*args)
else: lib.ElSpectralWindowXDist_z(*argsCtrl)
else: DataExcept()
return invNormMap
else: TypeExcept()
# (Pseudo-)Spectral cloud
# -----------------------
lib.ElSpectralCloud_s.argtypes = \
lib.ElSpectralCloud_d.argtypes = \
lib.ElSpectralCloud_c.argtypes = \
lib.ElSpectralCloud_z.argtypes = \
lib.ElSpectralCloudDist_s.argtypes = \
lib.ElSpectralCloudDist_d.argtypes = \
lib.ElSpectralCloudDist_c.argtypes = \
lib.ElSpectralCloudDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p]
lib.ElSpectralCloudX_s.argtypes = \
lib.ElSpectralCloudX_c.argtypes = \
lib.ElSpectralCloudXDist_s.argtypes = \
lib.ElSpectralCloudXDist_c.argtypes = \
[c_void_p,c_void_p,c_void_p,PseudospecCtrl_s]
lib.ElSpectralCloudX_d.argtypes = \
lib.ElSpectralCloudX_z.argtypes = \
lib.ElSpectralCloudXDist_d.argtypes = \
lib.ElSpectralCloudXDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,PseudospecCtrl_d]
def SpectralCloud(A,shifts,ctrl=None):
if type(A) is Matrix:
invNorms = Matrix(Base(A.tag))
args = [A.obj,shifts.obj,invNorms.obj]
argsCtrl = [A.obj,shifts.obj,invNorms.obj,ctrl]
if A.tag == sTag:
if ctrl == None: lib.ElSpectralCloud_s(*args)
else: lib.ElSpectralCloudX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSpectralCloud_d(*args)
else: lib.ElSpectralCloudX_d(*argsCtrl)
elif A.tag == cTag:
if ctrl == None: lib.ElSpectralCloud_c(*args)
else: lib.ElSpectralCloudX_c(*argsCtrl)
elif A.tag == zTag:
if ctrl == None: lib.ElSpectralCloud_z(*args)
else: lib.ElSpectralCloudX_z(*argsCtrl)
else: DataExcept()
return invNorms
elif type(A) is DistMatrix:
invNorms = DistMatrix(Base(A.tag),VR,STAR,A.Grid())
args = [A.obj,shifts.obj,invNorms.obj]
argsCtrl = [A.obj,shifts.obj,invNorms.obj,ctrl]
if A.tag == sTag:
if ctrl == None: lib.ElSpectralCloudDist_s(*args)
else: lib.ElSpectralCloudXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSpectralCloudDist_d(*args)
else: lib.ElSpectralCloudXDist_d(*argsCtrl)
elif A.tag == cTag:
if ctrl == None: lib.ElSpectralCloudDist_c(*args)
else: lib.ElSpectralCloudXDist_c(*argsCtrl)
elif A.tag == zTag:
if ctrl == None: lib.ElSpectralCloudDist_z(*args)
else: lib.ElSpectralCloudXDist_z(*argsCtrl)
else: DataExcept()
return invNorms
else: TypeExcept()
| bsd-3-clause |
ak681443/mana-deep | evaluation/allmods/mainfolder/eval.py | 1 | 4180 |
# coding: utf-8
# In[1]:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
import sys
th = int(sys.argv[1])
v = int(sys.argv[2])
# In[2]:
img1 = cv2.imread('/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test/op_U372_A.jpg.tif')
# In[3]:
input_img = Input(shape=(int(img1.shape[0]), int(img1.shape[1]),1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad')
# In[16]:
model.load_weights(sys.argv[3], by_name=True)
# In[5]:
def push_pqueue(queue, priority, value):
if len(queue)>20:
heapq.heappushpop(queue, (priority, value))
else:
heapq.heappush(queue, (priority, value))
# In[25]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1 = cv2.resize(img1, (int(img1.shape[0] ), int(img1.shape[1])))
img1[img1<th] = v
img1[img1>=th] = 0
X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test = np.reshape(X_test, (len(X_test), int(img1.shape[0]), int(img1.shape[1]), 1))
X_test_pred = model.predict(X_test, verbose=0)
# In[27]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1 = cv2.resize(img1, (int(img1.shape[0]), int(img1.shape[1] )))
img1[img1<th] = v
img1[img1>=th] = 0
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train), int(img1.shape[0]), int(img1.shape[1]), 1))
X_train_pred = model.predict(X_train, verbose=0)
# In[28]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
top10_correct = 0
top20_correct = 0
top5_correct = 0
top1_correct = 0
run_count = 0
mp = {}
for i in np.arange(0, len(files1)):
filen1 = files1[i]
pred = X_test_pred[i]
mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
pqueue = []
count = 0
for j in np.arange(0, len(files)):
filen = files[j]
tpred = X_train_pred[j]
score = 1 - spatial.distance.cosine(tpred.sum(axis=2).flatten(), pred.sum(axis=2).flatten())
push_pqueue(pqueue, score, filen)
# print len(files), count
i = 0
for top20 in pqueue:
i += 1
if top20[1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
if i>10:
top20_correct+=1
elif i>5:
top10_correct+=1
elif i>=1:
top5_correct+=1
elif i>=0:
top1_correct+=1
break
mp[filen1] = pqueue[0][1]
run_count+=1
print top20_correct/float(len(files1)),top10_correct/float(len(files1)),top5_correct/float(len(files1)),top1_correct
| apache-2.0 |
soft-matter/mr | doc/sphinxext/docscrape_sphinx.py | 4 | 7745 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
michrawson/nyu_ml_lectures | notebooks/figures/plot_kneighbors_regularization.py | 25 | 1363 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
def make_dataset(n_samples=100):
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, n_samples)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
return x, y
def plot_regression_datasets():
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for n_samples, ax in zip([10, 100, 1000], axes):
x, y = make_dataset(n_samples)
ax.plot(x, y, 'o', alpha=.6)
def plot_kneighbors_regularization():
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, 100)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
X = x[:, np.newaxis]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_test = np.linspace(-3, 3, 1000)
for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
kneighbor_regression.fit(X, y)
ax.plot(x, y_no_noise, label="true function")
ax.plot(x, y, "o", label="data")
ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
label="prediction")
ax.legend()
ax.set_title("n_neighbors = %d" % n_neighbors)
if __name__ == "__main__":
plot_kneighbors_regularization()
plt.show()
| cc0-1.0 |
arthurmensch/cogspaces | exps/reduce.py | 1 | 3396 | import os
from os.path import join
from typing import List, Union
import numpy as np
import pandas as pd
from joblib import Parallel, delayed, dump, load
from nilearn.input_data import NiftiMasker
from sklearn.utils import gen_batches
from cogspaces.datasets import fetch_mask, fetch_atlas_modl, \
fetch_contrasts, STUDY_LIST
from cogspaces.datasets.utils import get_data_dir
from cogspaces.raw_datasets.contrast import fetch_all
idx = pd.IndexSlice
def single_mask(masker, imgs):
return masker.transform(imgs)
def single_reduce(components, data, lstsq=False):
if not lstsq:
return data.dot(components.T)
else:
X, _, _, _ = np.linalg.lstsq(components.T, data.T)
return X.T
def mask_contrasts(studies: Union[str, List[str]] = 'all',
output_dir: str = 'masked',
use_raw=False,
n_jobs: int = 1):
batch_size = 10
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if use_raw and studies == 'all':
data = fetch_all()
else:
data = fetch_contrasts(studies)
mask = fetch_mask()
masker = NiftiMasker(smoothing_fwhm=4, mask_img=mask,
verbose=0, memory_level=1, memory=None).fit()
for study, this_data in data.groupby('study'):
imgs = this_data['z_map'].values
targets = this_data.reset_index()
n_samples = this_data.shape[0]
batches = list(gen_batches(n_samples, batch_size))
this_data = Parallel(n_jobs=n_jobs, verbose=10,
backend='multiprocessing', mmap_mode='r')(
delayed(single_mask)(masker, imgs[batch]) for batch in batches)
this_data = np.concatenate(this_data, axis=0)
dump((this_data, targets), join(output_dir, 'data_%s.pt' % study))
def reduce_contrasts(components: str = 'components_453_gm',
studies: Union[str, List[str]] = 'all',
masked_dir='unmasked', output_dir='reduced',
n_jobs=1, lstsq=False, ):
batch_size = 200
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if studies == 'all':
studies = STUDY_LIST
modl_atlas = fetch_atlas_modl()
mask = fetch_mask()
dictionary = modl_atlas[components]
masker = NiftiMasker(mask_img=mask).fit()
components = masker.transform(dictionary)
for study in studies:
this_data, targets = load(join(masked_dir, 'data_%s.pt' % study))
n_samples = this_data.shape[0]
batches = list(gen_batches(n_samples, batch_size))
this_data = Parallel(n_jobs=n_jobs, verbose=10,
backend='multiprocessing', mmap_mode='r')(
delayed(single_reduce)(components,
this_data[batch], lstsq=lstsq)
for batch in batches)
this_data = np.concatenate(this_data, axis=0)
dump((this_data, targets), join(output_dir,
'data_%s.pt' % study))
n_jobs = 65
mask_contrasts(studies='all', use_raw=True, output_dir=join(get_data_dir(), 'loadings'), n_jobs=n_jobs)
reduce_contrasts(studies='all',
masked_dir=join(get_data_dir(), 'masked'),
output_dir=join(get_data_dir(), 'loadings'),
components='components_453_gm', n_jobs=n_jobs, lstsq=False)
| bsd-2-clause |
harshaneelhg/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
DES-SL/EasyLens | easylens/Data/image_analysis.py | 1 | 7851 | __author__ = 'sibirrer'
import numpy as np
import scipy.ndimage.interpolation as interp
import astropy.io.fits as pyfits
import pyextract.pysex as pysex
import easylens.util as util
class ImageAnalysis(object):
"""
class for analysis routines acting on a single image
"""
def __init__(self):
pass
def estimate_bkg(self, image):
"""
:param image: 2d numpy array
:return: mean and sigma of background estimate
"""
HDUFile = self._get_cat(image)
mean, rms = self._get_background(HDUFile)
return mean, rms
def estimate_psf(self, path2exposure, kernel_size=21, kwargs_cut={}, restrict_psf=None):
"""
esitmates a psf kernel
:param image:
:return:
"""
fits = pyfits.open(path2exposure)
image = fits[0].data
fits.close()
HDUFile = self._get_cat(image)
cat = self._get_source_cat(HDUFile)
if kwargs_cut == {}:
kwargs_cut = self._estimate_star_thresholds(cat)
mask = self._find_objects(cat, kwargs_cut)
mag = np.array(cat.data['MAG_BEST'], dtype=float)
size = np.array(cat.data['FLUX_RADIUS'], dtype=float)
x_list, y_list, restrict_psf = self._get_coordinates(image, cat, mask, numPix=41, restrict_psf=restrict_psf)
if len(x_list) == 0:
return np.zeros((kernel_size,kernel_size)), restrict_psf, x_list, y_list, mask, mag, size, kwargs_cut
star_list = self._get_objects_image(image, x_list, y_list, numPix=41)
kernel = self._stacking(star_list, x_list, y_list)
kernel =util.cut_edges(kernel, kernel_size)
kernel = util.kernel_norm(kernel)
return kernel, restrict_psf, x_list, y_list, mask, mag, size, kwargs_cut
def _get_cat(self, image, conf_args={}):
"""
returns the sextractor catalogue of a given image
:param system:
:param image_name:
:return:
"""
params = ['NUMBER', 'FLAGS', 'X_IMAGE', 'Y_IMAGE', 'FLUX_BEST', 'FLUXERR_BEST', 'MAG_BEST', 'MAGERR_BEST',
'FLUX_RADIUS', 'CLASS_STAR', 'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', 'ELLIPTICITY']
HDUFile = pysex.run(image=image, params=params, conf_file=None, conf_args=conf_args, keepcat=False, rerun=False, catdir=None)
return HDUFile
def _get_source_cat(self, HDUFile):
"""
:param HDUFile:
:return: catalogue
"""
return HDUFile[2]
def _get_background(self, HDUFile):
"""
filters the mean and rms value of the background computed by sextractor
:param cat:
:return: mean, rms
"""
mean, rms = 0, 0
mean_found = False
rms_found = False
list = HDUFile[1].data[0][0]
for line in list:
line = line.strip()
line = line.split()
if line[0] == 'SEXBKGND' or line[0] == 'SEXBKGND=':
mean = float(line[1])
mean_found = True
if line[0] == 'SEXBKDEV' or line[0] == 'SEXBKDEV=':
rms = float(line[1])
rms_found = True
if mean_found == False or rms_found == False:
raise ValueError('no mean and rms value found in list.')
return mean, rms
def _estimate_star_thresholds(self, cat):
"""
estimates the cuts in the different sextractor quantities
:param cat:
:return:
"""
mag = np.array(cat.data['MAG_BEST'],dtype=float)
size = np.array(cat.data['FLUX_RADIUS'],dtype=float)
#ellipticity = cat.data['ELLIPTICITY']
kwargs_cuts = {}
mag_max = min(np.max(mag), 34)
mag_min = np.min(mag)
delta_mag = mag_max - mag_min
kwargs_cuts['MagMaxThresh'] = mag_max - 0.7*delta_mag
kwargs_cuts['MagMinThresh'] = mag_min #+ 0.01*delta_mag
mask = (mag<mag_max-0.5*delta_mag)
kwargs_cuts['SizeMinThresh'] = max(0, np.min(size[mask]))
kwargs_cuts['SizeMaxThresh'] = max(0, np.min(size[mask])+4)
kwargs_cuts['EllipticityThresh'] = 0.1
kwargs_cuts['ClassStarMax'] = 1.
kwargs_cuts['ClassStarMin'] = 0.5
return kwargs_cuts
def _find_objects(self, cat, kwargs_cut):
"""
:param cat: hdu[2] catalogue objects comming from sextractor
:return: selected objects in the catalogue data list
"""
mag = np.array(cat.data['MAG_BEST'],dtype=float)
size = np.array(cat.data['FLUX_RADIUS'],dtype=float)
ellipticity = cat.data['ELLIPTICITY']
classStar = cat.data['CLASS_STAR']
SizeMaxThresh = kwargs_cut['SizeMaxThresh']
SizeMinThresh = kwargs_cut['SizeMinThresh']
EllipticityThresh = kwargs_cut['EllipticityThresh']
MagMaxThresh = kwargs_cut['MagMaxThresh']
MagMinThresh = kwargs_cut['MagMinThresh']
ClassStarMax = kwargs_cut['ClassStarMax']
ClassStarMin = kwargs_cut['ClassStarMin']
mask = (size<SizeMaxThresh) & (ellipticity<EllipticityThresh) & (size>SizeMinThresh) & (mag<MagMaxThresh) & (mag>MagMinThresh) & (classStar<ClassStarMax) & (classStar>ClassStarMin)
return mask
def _get_coordinates(self, image, cat, mask, numPix=10, restrict_psf=None):
"""
:param image:
:param cat:
:param mask:
:param restrict_psf:
:return:
"""
nx, ny = image.shape
x_center = np.array(cat.data['X_IMAGE'], dtype=float)
y_center = np.array(cat.data['Y_IMAGE'], dtype=float)
x_center_mask = x_center[mask]
y_center_mask = y_center[mask]
num_objects = len(x_center_mask)
if restrict_psf == None:
restrict_psf = [True]*num_objects
x_list = []
y_list = []
for i in range(num_objects):
xc, yc = x_center_mask[i], y_center_mask[i]
if (int(xc)-numPix > 0) and (int(xc)+numPix < nx) and (int(yc)-numPix > 0) and (int(yc)+numPix < ny):
if restrict_psf[i]:
x_list.append(xc)
y_list.append(yc)
return x_list, y_list, restrict_psf
def _get_objects_image(self, image, x_list, y_list, numPix=10):
"""
returns all the cutouts of the locations of the selected objects
:param image:
:param cat:
:param mask:
:return:
"""
num_objects = len(x_list)
cutout_list = []
print("number of objects: ", num_objects)
for i in range(np.minimum(10, num_objects)):
xc, yc = x_list[i], y_list[i]
cutout = image[int(xc)-numPix-1:int(xc)+numPix, int(yc)-numPix-1:int(yc)+numPix]
cutout_list.append(cutout)
return cutout_list
def _stacking(self, star_list, x_list, y_list):
"""
:param star_list:
:return:
"""
n_stars = len(star_list)
shifteds = []
for i in range(n_stars):
xc, yc = x_list[i], y_list[i]
data = star_list[i]
x_shift = int(xc) - xc
y_shift = int(yc) - yc
shifted = interp.shift(data, [-y_shift, -x_shift], order=1)
shifteds.append(shifted)
print('=== object ===', i)
import matplotlib.pylab as plt
fig, ax1 = plt.subplots()
im = ax1.matshow(np.log10(shifted), origin='lower')
plt.axes(ax1)
fig.colorbar(im)
plt.show()
combined = sum(shifteds)
new=np.empty_like(combined)
max_pix = np.max(combined)
p = combined[combined>=max_pix/10**6] #in the SIS regime
new[combined < max_pix/10**6] = 0
new[combined >= max_pix/10**6] = p
kernel = util.kernel_norm(new)
return kernel
| mit |
zimuxin/AliMusicPrediction | 阿里音乐流行趋势预测项目_Group13/AliMusicPrediction/music_prediction-master/features/song.py | 1 | 3785 | from matplotlib.legend_handler import HandlerLine2D
import numpy as np
import csv
import matplotlib.pyplot as plt
#--------stable-------------------
import os,sys
path = os.getcwd()
parent_path = os.path.dirname(path)
sys.path.append(parent_path)
import static_data as sd
CURRENT_PATH=sd.CURRENT_PATH
ARTIST_FOLDER=sd.ARTIST_FOLDER
ARTIST=sd.ARTIST
SONGS=sd.SONGS
SONG_P_D_C=sd.SONG_P_D_C
ARTIST_P_D_C=sd.ARTIST_P_D_C
SONG_FAN=sd.SONG_FAN
ARTIST_FAN=sd.ARTIST_FAN
DAYS=sd.DAYS
START_UNIX =sd.START_UNIX
DAY_SECOND =sd.DAY_SECOND
START_WEEK=sd.START_WEEK
#--------stable-------------------
'''
songs structure:
{songs1:(mu,sigma),songs2:{mu,sigma},songs3:{mu,sigma}...}
'''
def mean_sigma(songs):
songs_num=len(songs)
with open(SONG_P_D_C, "r") as fr:
songs_id=fr.readline().strip("\n")
while songs_id and songs_num>0:
play = list(map(int, fr.readline().strip("\n").split(",")))
download = list(map(int, fr.readline().strip("\n").split(",")))
collect = list(map(int, fr.readline().strip("\n").split(",")))
if songs_id in songs:
play=np.array(play)
mu=np.mean(play)
sigma=np.sqrt((play*play).sum()/DAYS-mu*mu)
songs[songs_id]=(mu,sigma)
songs_num-=1
songs_id=fr.readline().strip("\n")
return songs
def sum_all():
return_play=[0 for i in range(DAYS)]
with open(SONG_P_D_C, "r") as fr:
songs_id=fr.readline().strip("\n")
while songs_id:
play = list(map(int, fr.readline().strip("\n").split(",")))
download = list(map(int, fr.readline().strip("\n").split(",")))
collect = list(map(int, fr.readline().strip("\n").split(",")))
for i in range(DAYS):
return_play[i]+=play[i]
songs_id=fr.readline().strip("\n")
return return_play
'''
songs structure:
{songs1:True,repeat,repeat,...}
'''
def sum_play(songs):
songs_num=len(songs)
return_play=[0 for i in range(DAYS)]
with open(SONG_P_D_C, "r") as fr:
songs_id=fr.readline().strip("\n")
while songs_id and songs_num>0:
play = list(map(int, fr.readline().strip("\n").split(",")))
download = list(map(int, fr.readline().strip("\n").split(",")))
collect = list(map(int, fr.readline().strip("\n").split(",")))
if songs_id in songs:
songs_num-=1
for i in range(DAYS):
return_play[i]+=play[i]
songs_id=fr.readline().strip("\n")
return return_play
def plot_nor_ms(songs):
songs_num=len(songs)
sum_play=np.array([0 for i in range(DAYS)])
sum_download=np.array([0 for i in range(DAYS)])
sum_collect=np.array([0 for i in range(DAYS)])
with open(SONG_P_D_C,'r') as fr:
songs_id=fr.readline().strip("\n")
while songs_id and songs_num>0:
play = list(map(int, fr.readline().strip("\n").split(",")))
download = list(map(int, fr.readline().strip("\n").split(",")))
collect = list(map(int, fr.readline().strip("\n").split(",")))
if songs_id in songs:
play=np.array(play)
sum_play+=play
songs_num-=1
songs_id=fr.readline().strip("\n")
p = plt.plot(sum_play, "bo", sum_play, "b-", marker="o")
#d = plt.plot(download, "ro", download, "r-", marker="o")
#c = plt.plot(collect, "go", collect, "g-", marker="o")
#plt.legend([p[1], d[1],c[1]], ["play", "download","collect"])
plt.lengend(p[1],["play"])
plt.title('SUM OF THE NORMAL MUSIC')
plt.xlabel('days')
plt.ylabel('times')
#plt.savefig(os.path.join(self.SONG_PLAY_FOLDER, songs_id+".png"))
plt.show()
| mit |
lbishal/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
pixki/redesestocasticas | ctmc.py | 1 | 4299 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Jairo Sánchez
# @Date: 2015-12-02 12:03:55
# @Last Modified by: Jairo Sánchez
# @Last Modified time: 2016-01-19 15:46:28
import numpy as np
from scipy import misc as sc
from scipy.stats import expon
import argparse
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import colormaps as cmaps
def bcc_recursive(S, lambd, mu):
a = lambd/mu
pi = np.array([1.]*(S+1))
j = np.arange(S+1)
d = np.sum(a**j / sc.factorial(j))
pi[0] = 1. / d
for j in range(1, S+1):
pi[j] = pi[0] * (a**j/sc.factorial(j))
# print "pi[", j, "]=", a**j, "/", sc.factorial(j), "*", pi[0]
return pi
def bcc_gauss(S, lambd, mu):
"""Genera la matriz con tasas de salida/entrada para cada estado de
la cadena, para que sea resuelta por la funcion gauss
"""
pi = np.array([1./(S+1)]*(S+1))
error = 1000
iterations = 0
while error >= 1e-6:
old_solution = np.array(pi)
for i in range(S+1):
if i == 0:
pi[0] = mu * pi[1] / (1.*lambd)
elif i == S:
pi[S] = (lambd*1.*pi[S-1])/(i*mu*1.)
else:
d = (1.*lambd*pi[i-1]+(i+1)*mu*1.*pi[i+1])
pi[i] = d / (lambd*1. + i*mu*1.)
pi = pi / np.sum(pi)
error = np.sum(np.abs(pi - old_solution))
iterations = iterations + 1
return pi
def bcc_sim(S, lambd, mu, simtime):
remaining = simtime
i = 0 # Estado actual
ts = 0
time = np.zeros(S+1)
while remaining > 0:
if i == 0:
T1 = expon.rvs(scale=1./lambd, size=1)
T2 = np.inf
elif i == S:
T1 = np.inf
T2 = expon.rvs(scale=1./(i*mu), size=1)
else:
T1 = expon.rvs(scale=1./lambd, size=1)
T2 = expon.rvs(scale=1./(i*mu), size=1)
if np.all(T1 < T2):
ts = T1
time[i] = time[i] + ts
i = i+1
else:
ts = T2
time[i] = time[i] + ts
i = i-1
remaining = remaining - ts[0]
progress = (simtime - remaining) / simtime
# print "{0}% --> {1} remaining".format(progress*100.0, remaining)
return time/simtime
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--method', type=str, required=True,
help='Metodo a usar para resolver el sistema',
choices=['gauss', 'simulation', 'recursive'])
parser.add_argument('-l', '--lambd', type=float, required=True,
help='Tasa de arribos, mu se calcula dado el cociente')
args = parser.parse_args()
np.set_printoptions(precision=7, suppress=True)
plt.register_cmap(name='viridis', cmap=cmaps.viridis)
fig = plt.figure()
ax = fig.gca(projection='3d')
# Con un mayor número de muestras se empieza a 'laggear' el visualizador
X = np.arange(1, 51) # Numero de servidores (S=[0, 49])
Y = np.linspace(0.1, 4.9999, num=50) # a = lambda/mu
Z = np.array([0.]*X.shape[0]*Y.shape[0])
Z.shape = (X.shape[0], Y.shape[0])
for i in range(X.shape[0]):
for j in range(Y.shape[0]):
mu = args.lambd / Y[j]
if 'gauss' in args.method:
P = bcc_gauss(X[i], args.lambd, mu)
elif 'simulation' in args.method:
P = bcc_sim(X[i], args.lambd, mu, 1000)
elif 'recursive' in args.method:
P = bcc_recursive(X[i], args.lambd, mu)
print 'P[S]=', P[-1], ' lambda=', args.lambd, ' mu=',
print mu, ', S=', X[i]
Z[i][j] = P[-1]
X, Y = np.meshgrid(X, Y)
plt.xlabel('S')
plt.ylabel('a=lambda/mu')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cmaps.viridis,
linewidth=0, antialiased=True, alpha=1.0,
shade=False)
# ax.set_zlim(0, 1.0)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
alexisrosuel/PyEWA | pyewa/distributions.py | 1 | 1217 |
"""
EWA.prior
A collection of common distributions
"""
import numpy as np
import matplotlib.pyplot as plt
class Distribution:
def __init__(self, support=np.linspace(0, 1, 10), pdf=np.ones(shape=10)):
self.pdf = pdf
self.support = support
def update_pdf(self, pdf):
self.pdf = pdf
def plot_distribution(self):
if len(self.support.shape) == 1:
plt.scatter(x=self.support, y=self.pdf)
plt.xlabel('$\\theta$')
plt.ylabel('$f(\\theta)$')
plt.title('Distribution')
plt.grid(True)
plt.show()
elif len(self.support.shape) == 2:
extent = [self.support.min(), self.support.max(),
self.support.min(), self.support.max()]
plt.imshow(self.pdf, cmap='Reds',
interpolation='nearest', extent=extent, origin='lower')
plt.show()
else:
print('impossible to print, dimension of input > 2')
class Uniform:
def __init__(self, support=np.linspace(0, 1, 10)):
self.support = support
self.densite = 1. / np.prod(self.support.shape)
self.pdf = (self.support * 0) + self.densite
| mit |
vshtanko/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
marcusrehm/serenata-de-amor | rosie/rosie/chamber_of_deputies/tests/test_traveled_speeds_classifier.py | 2 | 3774 | from unittest import TestCase
import numpy as np
import pandas as pd
import sklearn
from numpy.testing import assert_array_equal
from rosie.chamber_of_deputies.classifiers.traveled_speeds_classifier import TraveledSpeedsClassifier
class TestTraveledSpeedsClassifier(TestCase):
def setUp(self):
self.dataset = pd.read_csv('rosie/chamber_of_deputies/tests/fixtures/traveled_speeds_classifier.csv',
dtype={'recipient_id': np.str})
self.subject = TraveledSpeedsClassifier()
self.subject.fit(self.dataset)
def test_fit_learns_a_polynomial_for_regression(self):
self.assertIsInstance(self.subject.polynomial, np.ndarray)
def test_predict_doesnt_work_before_fitting_the_model(self):
subject = TraveledSpeedsClassifier()
with self.assertRaises(sklearn.exceptions.NotFittedError):
subject.predict(self.dataset)
def test_predict_returns_a_prediction_for_each_observation(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(len(prediction), len(self.dataset))
def test_predict_considers_meal_reimbursements_in_days_with_more_than_8_outliers(self):
prediction = self.subject.predict(self.dataset)
assert_array_equal(np.repeat(-1, 9), prediction[:9])
def test_predict_considers_non_meal_reibursement_an_inlier(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(1, prediction[14])
def test_predict_considers_non_meal_reibursement_an_inlier_even_when_more_than_8_meal_reimbursements(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(1, prediction[9])
def test_predict_considers_meal_reibursement_without_congressperson_id_an_inlier_even_when_more_than_8_meal_reimbursements(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(1, prediction[10])
def test_predict_considers_meal_reibursement_without_latitude_an_inlier_even_when_more_than_8_meal_reimbursements(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(1, prediction[11])
def test_predict_considers_meal_reibursement_without_longitude_an_inlier_even_when_more_than_8_meal_reimbursements(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(1, prediction[12])
def test_predict_uses_learned_thresholds_from_fit_dataset(self):
subject = TraveledSpeedsClassifier(contamination=.6)
subject.fit(self.dataset)
assert_array_equal(
np.repeat(-1, 6), subject.predict(self.dataset[13:19]))
def test_predict_limits_the_number_of_outliers_with_contamination_param(self):
subject = TraveledSpeedsClassifier(contamination=.5)
subject.fit(self.dataset)
returned_contamination = \
(subject.predict(self.dataset) == -1).sum() / len(self.dataset)
self.assertLess(returned_contamination, .5)
def test_predict_contamination_may_go_higher_than_expected_given_expenses_threshold(self):
subject = TraveledSpeedsClassifier(contamination=.2)
subject.fit(self.dataset)
returned_contamination = \
(subject.predict(self.dataset) == -1).sum() / len(self.dataset)
self.assertGreater(returned_contamination, .2)
def test_predict_validates_range_of_values_for_contamination_param(self):
with self.assertRaises(ValueError):
TraveledSpeedsClassifier(contamination=0)
with self.assertRaises(ValueError):
TraveledSpeedsClassifier(contamination=1)
def test_is_company_coordinates_in_brazil(self):
prediction = self.subject.predict(self.dataset)
self.assertEqual(1, prediction[28])
| mit |
liberatorqjw/scikit-learn | sklearn/utils/arpack.py | 31 | 64776 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real or complex square matrix.
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
ky822/nyu_ml_lectures | notebooks/figures/plot_interactive_tree.py | 20 | 2317 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO # doctest: +SKIP
from sklearn.tree import export_graphviz
from scipy.misc import imread
from scipy import ndimage
import re
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def tree_image(tree, fout=None):
try:
import pydot
except ImportError:
# make a hacky white plot
x = np.ones((10, 10))
x[0, 0] = 0
return x
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data)
data = re.sub(r"gini = 0\.[0-9]+\\n", "", dot_data.getvalue())
data = re.sub(r"samples = [0-9]+\\n", "", data)
data = re.sub(r"\\nsamples = [0-9]+", "", data)
graph = pydot.graph_from_dot_data(data)
if fout is None:
fout = "tmp.png"
graph.write_png(fout)
return imread(fout)
def plot_tree(max_depth=1):
fig, ax = plt.subplots(1, 2, figsize=(15, 7))
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)
Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
faces = faces.reshape(xx.shape)
border = ndimage.laplace(faces) != 0
ax[0].contourf(xx, yy, Z, alpha=.4)
ax[0].scatter(xx[border], yy[border], marker='.', s=1)
ax[0].set_title("max_depth = %d" % max_depth)
ax[1].imshow(tree_image(tree))
ax[1].axis("off")
else:
ax[0].set_title("data set")
ax[1].set_visible(False)
ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax[0].set_xlim(x_min, x_max)
ax[0].set_ylim(y_min, y_max)
ax[0].set_xticks(())
ax[0].set_yticks(())
def plot_tree_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_tree, max_depth=slider)
| cc0-1.0 |
anshulgupta0803/music-genre-classification | code/CNN.py | 1 | 7256 |
# coding: utf-8
# In[ ]:
from __future__ import print_function
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import os
from tflearn.data_utils import shuffle
class CNN(object):
def __init__(self, patch_size, num_filters_fist_layer, num_filters_second_layer,
size_fully_connected_layer, image_x=400, image_y=400, image_channels=4, num_classes=10):
self.X_train = None
self.Y_train = None
self.X_test = None
self.Y_test = None
self.image_x = image_x
self.image_y = image_y
self.image_channels = image_channels
image_size = self.image_x * self.image_y
self.num_classes = num_classes
self.x = tf.placeholder(tf.float32, shape=[None, image_x, image_y, image_channels])
self.y_ = tf.placeholder(tf.float32, shape=[None, num_classes])
self.keep_prob = tf.placeholder(tf.float32)
def weight_variable(shape, nameVar):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=nameVar)
def bias_variable(shape, nameVar):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=nameVar)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# First Layer (Convolution and Max Pool)
self.W_conv1 = weight_variable([patch_size, patch_size, image_channels, num_filters_fist_layer], "filter_layer1")
b_conv1 = bias_variable([num_filters_fist_layer], "bias_layer1")
x_image = tf.reshape(self.x, [-1, image_x, image_y, image_channels])
# Apply Convolution and Max Pool
h_conv1 = tf.nn.relu(conv2d(x_image, self.W_conv1) + b_conv1)
print(h_conv1.get_shape())
h_pool1 = max_pool_2x2(h_conv1)
print(h_pool1.get_shape())
# Second Layer (Convolution and Max Pool)
self.W_conv2 = weight_variable([patch_size, patch_size, num_filters_fist_layer, num_filters_second_layer], "filter_layer2")
b_conv2 = bias_variable([num_filters_second_layer], "bias_layer2")
# Apply Convolution and Max Pool
h_conv2 = tf.nn.relu(conv2d(h_pool1, self.W_conv2) + b_conv2)
print(h_conv2.get_shape())
h_pool2 = max_pool_2x2(h_conv2)
print(h_pool2.get_shape())
# Fully Connected Layer
W_fc1 = weight_variable([int(image_x / 4) * int(image_y / 4) * num_filters_second_layer, size_fully_connected_layer], "W_fc1")
b_fc1 = bias_variable([size_fully_connected_layer], "b_fc1")
h_pool2_flat = tf.reshape(h_pool2, [-1, int(image_x / 4) * int(image_y / 4) * num_filters_second_layer])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
print(h_fc1.get_shape())# the shape of h_fc1 is [-1, size_fully_connected_layer]
# Add dropout
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Add the last fully connected layer for output
W_fc2 = weight_variable([size_fully_connected_layer, num_classes], "W_fc2")
b_fc2 = bias_variable([num_classes], "b_fc2")
l2_loss = 0.0
l2_loss += tf.nn.l2_loss(W_fc2)
l2_loss += tf.nn.l2_loss(b_fc2)
self.y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
self.cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y_ * tf.log(self.y), reduction_indices=[1]))
# self.y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y))
# self.train = tf.train.AdamOptimizer(1e-4).minimize(self.cross_entropy)
self.train = tf.train.GradientDescentOptimizer(1e-4).minimize(self.cross_entropy)
self.correct_prediction = tf.cast(tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1)), tf.float32) + 1e-6 * l2_loss
self.accuracy = tf.reduce_mean(self.correct_prediction)
def load_data(self, dir='/home/anshul/powerSpectrograms200/'):
self.X_train = np.zeros((800, self.image_x, self.image_y, self.image_channels))
self.Y_train = np.zeros((800,), dtype=int)
self.X_test = np.zeros((200, self.image_x, self.image_y, self.image_channels))
self.Y_test = np.zeros((200,), dtype=int)
genres = {'blues': 0,
'classical': 1,
'country': 2,
'disco': 3,
'hiphop': 4,
'jazz': 5,
'metal': 6,
'pop': 7,
'reggae': 8,
'rock': 9}
indexTrain = 0
indexTest = 0
for genre in genres.keys():
for count in range(0, 100):
path = dir + genre + '/' + genre + '.%0.5d' % count + '.au.png'
if os.path.isfile(path):
if count < 80:
self.X_train[indexTrain] = plt.imread(path)
self.Y_train[indexTrain] = genres[genre]
indexTrain += 1
else:
self.X_test[indexTest] = plt.imread(path)
self.Y_test[indexTest] = genres[genre]
indexTest += 1
Y_train_onehot = np.zeros((self.Y_train.shape[0], self.num_classes))
Y_train_onehot[np.arange(self.Y_train.shape[0]), self.Y_train] = 1
Y_test_onehot = np.zeros((self.Y_test.shape[0], self.num_classes))
Y_test_onehot[np.arange(self.Y_test.shape[0]), self.Y_test] = 1
self.Y_train = Y_train_onehot
self.Y_test = Y_test_onehot
self.X_train, self.Y_train = shuffle(self.X_train, self.Y_train)
cnn = CNN(image_x=200,
image_y=200,
image_channels=4,
num_classes=10,
num_filters_fist_layer=80,
num_filters_second_layer=80,
patch_size=5,
size_fully_connected_layer=40)
# In[ ]:
cnn.load_data()
# In[ ]:
with tf.Session() as session:
init = tf.global_variables_initializer()
session.run(init)
train_batches = 16
test_batches = 10
n_train_images = 800
n_test_images = 200
train_step_size = int(n_train_images / train_batches)
test_step_size = int(n_test_images / test_batches)
for i in range(0, n_train_images, train_step_size):
print("Batch", i, "to", i + train_step_size - 1)
X_train = cnn.X_train[i : i + train_step_size]
Y_train = cnn.Y_train[i : i + train_step_size]
feed_dict = {cnn.x : X_train, cnn.y_ : Y_train, cnn.keep_prob : 1.0}
session.run(cnn.train, feed_dict)
trainAccuracy = session.run(cnn.accuracy, feed_dict)
print("Train Accuracy:", trainAccuracy)
feed_dict = {cnn.x : cnn.X_test, cnn.y_ : cnn.Y_test, cnn.keep_prob : 1.0}
testAccuracy = session.run(cnn.accuracy, feed_dict)
print("Test Accuracy: ", testAccuracy)
# In[ ]:
| apache-2.0 |
davidwaroquiers/pymatgen | pymatgen/analysis/diffraction/tests/test_tem.py | 5 | 11403 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Unit tests for TEM calculator.
"""
import unittest
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from pymatgen.analysis.diffraction.tem import TEMCalculator
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.201"
__maintainer__ = "Jason Liang"
__email__ = "fwan@berkeley.edu, yhljason@berkeley.edu"
__date__ = "2/20/20"
class TEMCalculatorTest(PymatgenTest):
def test_wavelength_rel(self):
# Tests that the relativistic wavelength formula (for 200kv electron beam) is correct
c = TEMCalculator()
self.assertAlmostEqual(c.wavelength_rel(), 0.0251, places=3)
def test_generate_points(self):
# Tests that 3d points are properly generated
c = TEMCalculator()
actual = c.generate_points(-1, 1)
expected = np.array(
[
[-1, -1, -1],
[-1, -1, 0],
[-1, -1, 1],
[0, -1, -1],
[0, -1, 0],
[0, -1, 1],
[1, -1, -1],
[1, -1, 0],
[1, -1, 1],
[-1, 0, -1],
[-1, 0, 0],
[-1, 0, 1],
[0, 0, -1],
[0, 0, 0],
[0, 0, 1],
[1, 0, -1],
[1, 0, 0],
[1, 0, 1],
[-1, 1, -1],
[-1, 1, 0],
[-1, 1, 1],
[0, 1, -1],
[0, 1, 0],
[0, 1, 1],
[1, 1, -1],
[1, 1, 0],
[1, 1, 1],
]
)
self.assertArrayEqual(expected, actual)
def test_zone_axis_filter(self):
# Tests that the appropriate Laue-Zoned points are returned
c = TEMCalculator()
empty_points = np.asarray([])
self.assertEqual(c.zone_axis_filter(empty_points), [])
points = np.asarray([[-1, -1, -1]])
self.assertEqual(c.zone_axis_filter(points), [])
laue_1 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, -1]])
self.assertEqual(c.zone_axis_filter(laue_1, 1), [(0, 0, 1)])
def test_get_interplanar_spacings(self):
# Tests that the appropriate interplacing spacing is returned
c = TEMCalculator()
point = [(3, 9, 0)]
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
tet = self.get_structure("Li10GeP2S12")
hexa = self.get_structure("Graphite")
ortho = self.get_structure("K2O2")
mono = self.get_structure("Li3V2(PO4)3")
spacings_cubic = c.get_interplanar_spacings(cubic, point)
spacings_tet = c.get_interplanar_spacings(tet, point)
spacings_hexa = c.get_interplanar_spacings(hexa, point)
spacings_ortho = c.get_interplanar_spacings(ortho, point)
spacings_mono = c.get_interplanar_spacings(mono, point)
for p in point:
self.assertAlmostEqual(spacings_cubic[p], 0.4436675557216236)
self.assertAlmostEqual(spacings_tet[p], 0.9164354445646701)
self.assertAlmostEqual(spacings_hexa[p], 0.19775826179547752)
self.assertAlmostEqual(spacings_ortho[p], 0.5072617738916)
self.assertAlmostEqual(spacings_mono[p], 0.84450786041677972)
def test_bragg_angles(self):
# Tests that the appropriate bragg angle is returned. Testing formula with values of x-ray diffraction in
# materials project.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(1, 1, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
bragg_angles_val = np.arcsin(1.5406 / (2 * spacings[point[0]]))
self.assertAlmostEqual(bragg_angles_val, 0.262, places=3)
def test_get_s2(self):
# Tests that the appropriate s2 factor is returned.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(-10, 3, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
angles = c.bragg_angles(spacings)
s2 = c.get_s2(angles)
for p in s2:
self.assertAlmostEqual(s2[p], 1.5381852947115047)
def test_x_ray_factors(self):
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(-10, 3, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
angles = c.bragg_angles(spacings)
x_ray = c.x_ray_factors(cubic, angles)
self.assertAlmostEqual(x_ray["Cs"][(-10, 3, 0)], 14.42250869579648)
self.assertAlmostEqual(x_ray["Cl"][(-10, 3, 0)], 2.7804915737999103)
def test_electron_scattering_factors(self):
# Test the electron atomic scattering factor, values approximate with
# international table of crystallography volume C. Rounding error when converting hkl to sin(theta)/lambda.
# Error increases as sin(theta)/lambda is smaller.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
nacl = Structure.from_spacegroup("Fm-3m", Lattice.cubic(5.692), ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 1, 3)]
point_nacl = [(4, 2, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
spacings_nacl = c.get_interplanar_spacings(nacl, point_nacl)
angles = c.bragg_angles(spacings)
angles_nacl = c.bragg_angles(spacings_nacl)
elscatt = c.electron_scattering_factors(cubic, angles)
elscatt_nacl = c.electron_scattering_factors(nacl, angles_nacl)
self.assertAlmostEqual(elscatt["Cs"][(2, 1, 3)], 2.890, places=1)
self.assertAlmostEqual(elscatt["Cl"][(2, 1, 3)], 1.138, places=1)
self.assertAlmostEqual(elscatt_nacl["Na"][(4, 2, 0)], 0.852, places=1)
self.assertAlmostEqual(elscatt_nacl["Cl"][(4, 2, 0)], 1.372, places=1)
def test_cell_scattering_factors(self):
# Test that fcc structure gives 0 intensity for mixed even, odd hkl.
c = TEMCalculator()
nacl = Structure.from_spacegroup("Fm-3m", Lattice.cubic(5.692), ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 1, 0)]
spacings = c.get_interplanar_spacings(nacl, point)
angles = c.bragg_angles(spacings)
cellscatt = c.cell_scattering_factors(nacl, angles)
self.assertAlmostEqual(cellscatt[(2, 1, 0)], 0)
def test_cell_intensity(self):
# Test that bcc structure gives lower intensity for h + k + l != even.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 1, 0)]
point2 = [(2, 2, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
spacings2 = c.get_interplanar_spacings(cubic, point2)
angles = c.bragg_angles(spacings)
angles2 = c.bragg_angles(spacings2)
cellint = c.cell_intensity(cubic, angles)
cellint2 = c.cell_intensity(cubic, angles2)
self.assertGreater(cellint2[(2, 2, 0)], cellint[(2, 1, 0)])
def test_normalized_cell_intensity(self):
# Test that the method correctly normalizes a value.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 0, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
angles = c.bragg_angles(spacings)
cellint = c.normalized_cell_intensity(cubic, angles)
self.assertAlmostEqual(cellint[(2, 0, 0)], 1)
def test_is_parallel(self):
c = TEMCalculator()
structure = self.get_structure("Si")
self.assertTrue(c.is_parallel(structure, (1, 0, 0), (3, 0, 0)))
self.assertFalse(c.is_parallel(structure, (1, 0, 0), (3, 0, 1)))
def test_get_first_point(self):
c = TEMCalculator()
latt = Lattice.cubic(4.209)
points = c.generate_points(-2, 2)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
first_pt = c.get_first_point(cubic, points)
self.assertTrue(4.209 in first_pt.values())
def test_interplanar_angle(self):
# test interplanar angles. Reference values from KW Andrews,
# Interpretation of Electron Diffraction pp70-90.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
phi = c.get_interplanar_angle(cubic, (0, 0, -1), (0, -1, 0))
self.assertAlmostEqual(90, phi, places=1)
tet = self.get_structure("Li10GeP2S12")
phi = c.get_interplanar_angle(tet, (0, 0, 1), (1, 0, 3))
self.assertAlmostEqual(25.796, phi, places=1)
latt = Lattice.hexagonal(2, 4)
hex = Structure(latt, ["Ab"], [[0, 0, 0]])
phi = c.get_interplanar_angle(hex, (0, 0, 1), (1, 0, 6))
self.assertAlmostEqual(21.052, phi, places=1)
def test_get_plot_coeffs(self):
# Test if x * p1 + y * p2 yields p3.
c = TEMCalculator()
coeffs = c.get_plot_coeffs((1, 1, 0), (1, -1, 0), (2, 0, 0))
self.assertArrayAlmostEqual(np.array([1.0, 1.0]), coeffs)
def test_get_positions(self):
c = TEMCalculator()
points = c.generate_points(-2, 2)
structure = self.get_structure("Si")
positions = c.get_positions(structure, points)
self.assertArrayEqual([0, 0], positions[(0, 0, 0)])
# Test silicon diffraction data spot rough positions:
# see https://www.doitpoms.ac.uk/tlplib/diffraction-patterns/printall.php
self.assertArrayAlmostEqual([1, 0], positions[(-1, 0, 0)], 0)
def test_TEM_dots(self):
# All dependencies in TEM_dots method are tested. Only make sure each object created is
# the class desired.
c = TEMCalculator()
points = c.generate_points(-2, 2)
structure = self.get_structure("Si")
dots = c.tem_dots(structure, points)
self.assertTrue(all([isinstance(x, tuple) for x in dots]))
def test_get_pattern(self):
# All dependencies in get_pattern method are tested.
# Only make sure result is a pd dataframe.
c = TEMCalculator()
structure = self.get_structure("Si")
self.assertTrue(isinstance(c.get_pattern(structure), pd.DataFrame))
def test_get_plot_2d(self):
c = TEMCalculator()
structure = self.get_structure("Si")
self.assertTrue(isinstance(c.get_plot_2d(structure), go.Figure))
def test_get_plot_2d_concise(self):
c = TEMCalculator()
structure = self.get_structure("Si")
fig = c.get_plot_2d_concise(structure)
width = fig.layout.width
height = fig.layout.height
self.assertTrue(width == 121 and height == 121)
if __name__ == "__main__":
unittest.main()
| mit |
HarllanAndrye/nilmtk | nilmtk/feature_detectors/cluster.py | 6 | 5343 | from __future__ import print_function, division
import numpy as np
import pandas as pd
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
def cluster(X, max_num_clusters=3, exact_num_clusters=None):
'''Applies clustering on reduced data,
i.e. data where power is greater than threshold.
Parameters
----------
X : pd.Series or single-column pd.DataFrame
max_num_clusters : int
Returns
-------
centroids : ndarray of int32s
Power in different states of an appliance, sorted
'''
# Find where power consumption is greater than 10
data = _transform_data(X)
# Find clusters
centroids = _apply_clustering(data, max_num_clusters, exact_num_clusters)
centroids = np.append(centroids, 0) # add 'off' state
centroids = np.round(centroids).astype(np.int32)
centroids = np.unique(centroids) # np.unique also sorts
# TODO: Merge similar clusters
return centroids
def _transform_data(data):
'''Subsamples if needed and converts to column vector (which is what
scikit-learn requires).
Parameters
----------
data : pd.Series or single column pd.DataFrame
Returns
-------
data_above_thresh : ndarray
column vector
'''
MAX_NUMBER_OF_SAMPLES = 2000
MIN_NUMBER_OF_SAMPLES = 20
DATA_THRESHOLD = 10
data_above_thresh = data[data > DATA_THRESHOLD].dropna().values
n_samples = len(data_above_thresh)
if n_samples < MIN_NUMBER_OF_SAMPLES:
return np.zeros((MAX_NUMBER_OF_SAMPLES, 1))
elif n_samples > MAX_NUMBER_OF_SAMPLES:
# Randomly subsample (we don't want to smoothly downsample
# because that is likely to change the values)
random_indices = np.random.randint(0, n_samples, MAX_NUMBER_OF_SAMPLES)
resampled = data_above_thresh[random_indices]
return resampled.reshape(MAX_NUMBER_OF_SAMPLES, 1)
else:
return data_above_thresh.reshape(n_samples, 1)
def _apply_clustering_n_clusters(X, n_clusters):
"""
:param X: ndarray
:param n_clusters: exact number of clusters to use
:return:
"""
from sklearn.cluster import KMeans
k_means = KMeans(init='k-means++', n_clusters=n_clusters)
k_means.fit(X)
return k_means.labels_, k_means.cluster_centers_
def _apply_clustering(X, max_num_clusters, exact_num_clusters=None):
'''
Parameters
----------
X : ndarray
max_num_clusters : int
Returns
-------
centroids : list of numbers
List of power in different states of an appliance
'''
# If we import sklearn at the top of the file then it makes autodoc fail
from sklearn import metrics
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Finds whether 2 or 3 gives better Silhouellete coefficient
# Whichever is higher serves as the number of clusters for that
# appliance
num_clus = -1
sh = -1
k_means_labels = {}
k_means_cluster_centers = {}
k_means_labels_unique = {}
# If the exact number of clusters are specified, then use that
if exact_num_clusters is not None:
labels, centers = _apply_clustering_n_clusters(X, exact_num_clusters)
return centers.flatten()
# Exact number of clusters are not specified, use the cluster validity measures
# to find the optimal number
for n_clusters in range(1, max_num_clusters):
try:
labels, centers = _apply_clustering_n_clusters(X, n_clusters)
k_means_labels[n_clusters] = labels
k_means_cluster_centers[n_clusters] = centers
k_means_labels_unique[n_clusters] = np.unique(labels)
try:
sh_n = metrics.silhouette_score(
X, k_means_labels[n_clusters], metric='euclidean')
if sh_n > sh:
sh = sh_n
num_clus = n_clusters
except Exception:
num_clus = n_clusters
except Exception:
if num_clus > -1:
return k_means_cluster_centers[num_clus]
else:
return np.array([0])
return k_means_cluster_centers[num_clus].flatten()
def hart85_means_shift_cluster(pair_buffer_df, cols):
from sklearn.cluster import MeanShift
# Creating feature vector
cluster_df = pd.DataFrame()
power_types = [col[1] for col in cols]
if 'active' in power_types:
cluster_df['active'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Active']) + np.fabs(row['T2 Active'])) / 2), axis=1), index=pair_buffer_df.index)
if 'reactive' in power_types:
cluster_df['reactive'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Reactive']) + np.fabs(row['T2 Reactive'])) / 2), axis=1), index=pair_buffer_df.index)
X = cluster_df.values.reshape((len(cluster_df.index), len(cols)))
ms = MeanShift(bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
return pd.DataFrame(cluster_centers, columns=cols)
| apache-2.0 |
QuLogic/iris | docs/iris/example_code/Meteorology/wind_speed.py | 11 | 2361 | """
Plotting wind direction using quiver
===========================================================
This example demonstrates using quiver to plot wind speed contours and wind
direction arrows from wind vector component input data. The vector components
are co-located in space in this case.
For the second plot, the data used for the arrows is normalised to produce
arrows with a uniform size on the plot.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coord_categorisation
import iris.quickplot as qplt
import cartopy
import cartopy.feature as cfeat
import cartopy.crs as ccrs
def main():
# Load the u and v components of wind from a pp file
infile = iris.sample_data_path('wind_speed_lake_victoria.pp')
uwind = iris.load_cube(infile, 'x_wind')
vwind = iris.load_cube(infile, 'y_wind')
ulon = uwind.coord('longitude')
vlon = vwind.coord('longitude')
# The longitude points go from 180 to 540, so subtract 360 from them
ulon.points = ulon.points - 360.0
vlon.points = vlon.points - 360.0
# Create a cube containing the wind speed
windspeed = (uwind ** 2 + vwind ** 2) ** 0.5
windspeed.rename('windspeed')
x = ulon.points
y = uwind.coord('latitude').points
u = uwind.data
v = vwind.data
# Set up axes to show the lake
lakes = cfeat.NaturalEarthFeature('physical', 'lakes', '50m',
facecolor='none')
plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_feature(lakes)
# Get the coordinate reference system used by the data
transform = ulon.coord_system.as_cartopy_projection()
# Plot the wind speed as a contour plot
qplt.contourf(windspeed, 20)
# Add arrows to show the wind vectors
plt.quiver(x, y, u, v, pivot='middle', transform=transform)
plt.title("Wind speed over Lake Victoria")
qplt.show()
# Normalise the data for uniform arrow size
u_norm = u / np.sqrt(u ** 2.0 + v ** 2.0)
v_norm = v / np.sqrt(u ** 2.0 + v ** 2.0)
plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_feature(lakes)
qplt.contourf(windspeed, 20)
plt.quiver(x, y, u_norm, v_norm, pivot='middle', transform=transform)
plt.title("Wind speed over Lake Victoria")
qplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
MartinSavc/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
jesseengel/magenta | magenta/models/rl_tuner/rl_tuner.py | 2 | 81040 | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the main RL Tuner class.
RL Tuner is a Deep Q Network (DQN) with augmented reward to create melodies
by using reinforcement learning to fine-tune a trained Note RNN according
to some music theory rewards.
Also implements two alternatives to Q learning: Psi and G learning. The
algorithm can be switched using the 'algorithm' hyperparameter.
For more information, please consult the README.md file in this directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import urllib
from magenta.models.rl_tuner import note_rnn_loader
from magenta.models.rl_tuner import rl_tuner_eval_metrics
from magenta.models.rl_tuner import rl_tuner_ops
from magenta.music import melodies_lib as mlib
from magenta.music import midi_io
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
from six.moves import range # pylint: disable=redefined-builtin
from six.moves import reload_module # pylint: disable=redefined-builtin
from six.moves import urllib # pylint: disable=redefined-builtin
import tensorflow as tf
# Note values of special actions.
NOTE_OFF = 0
NO_EVENT = 1
# Training data sequences are limited to this length, so the padding queue pads
# to this length.
TRAIN_SEQUENCE_LENGTH = 192
def reload_files():
"""Used to reload the imported dependency files (needed for ipynb notebooks).
"""
reload_module(note_rnn_loader)
reload_module(rl_tuner_ops)
reload_module(rl_tuner_eval_metrics)
class RLTuner(object):
"""Implements a recurrent DQN designed to produce melody sequences."""
def __init__(self, output_dir,
# Hyperparameters
dqn_hparams=None,
reward_mode='music_theory_all',
reward_scaler=1.0,
exploration_mode='egreedy',
priming_mode='random_note',
stochastic_observations=False,
algorithm='q',
# Trained Note RNN to load and tune
note_rnn_checkpoint_dir=None,
note_rnn_checkpoint_file=None,
note_rnn_type='default',
note_rnn_hparams=None,
# Other music related settings.
num_notes_in_melody=32,
input_size=rl_tuner_ops.NUM_CLASSES,
num_actions=rl_tuner_ops.NUM_CLASSES,
midi_primer=None,
# Logistics.
save_name='rl_tuner.ckpt',
output_every_nth=1000,
training_file_list=None,
summary_writer=None,
initialize_immediately=True):
"""Initializes the MelodyQNetwork class.
Args:
output_dir: Where the model will save its compositions (midi files).
dqn_hparams: A HParams object containing the hyperparameters of
the DQN algorithm, including minibatch size, exploration probability,
etc.
reward_mode: Controls which reward function can be applied. There are
several, including 'scale', which teaches the model to play a scale,
and of course 'music_theory_all', which is a music-theory-based reward
function composed of other functions.
reward_scaler: Controls the emphasis placed on the music theory rewards.
This value is the inverse of 'c' in the academic paper.
exploration_mode: can be 'egreedy' which is an epsilon greedy policy, or
it can be 'boltzmann', in which the model will sample from its output
distribution to choose the next action.
priming_mode: Each time the model begins a new composition, it is primed
with either a random note ('random_note'), a random MIDI file from the
training data ('random_midi'), or a particular MIDI file
('single_midi').
stochastic_observations: If False, the note that the model chooses to
play next (the argmax of its softmax probabilities) deterministically
becomes the next note it will observe. If True, the next observation
will be sampled from the model's softmax output.
algorithm: can be 'default', 'psi', 'g' or 'pure_rl', for different
learning algorithms
note_rnn_checkpoint_dir: The directory from which the internal
NoteRNNLoader will load its checkpointed LSTM.
note_rnn_checkpoint_file: A checkpoint file to use in case one cannot be
found in the note_rnn_checkpoint_dir.
note_rnn_type: If 'default', will use the basic LSTM described in the
research paper. If 'basic_rnn', will assume the checkpoint is from a
Magenta basic_rnn model.
note_rnn_hparams: A HParams object which defines the hyper parameters
used to train the MelodyRNN model that will be loaded from a checkpoint.
num_notes_in_melody: The length of a composition of the model
input_size: the size of the one-hot vector encoding a note that is input
to the model.
num_actions: The size of the one-hot vector encoding a note that is
output by the model.
midi_primer: A midi file that can be used to prime the model if
priming_mode is set to 'single_midi'.
save_name: Name the model will use to save checkpoints.
output_every_nth: How many training steps before the model will print
an output saying the cumulative reward, and save a checkpoint.
training_file_list: A list of paths to tfrecord files containing melody
training data. This is necessary to use the 'random_midi' priming mode.
summary_writer: A tf.summary.FileWriter used to log metrics.
initialize_immediately: if True, the class will instantiate its component
MelodyRNN networks and build the graph in the constructor.
"""
# Make graph.
self.graph = tf.Graph()
with self.graph.as_default():
# Memorize arguments.
self.input_size = input_size
self.num_actions = num_actions
self.output_every_nth = output_every_nth
self.output_dir = output_dir
self.save_path = os.path.join(output_dir, save_name)
self.reward_scaler = reward_scaler
self.reward_mode = reward_mode
self.exploration_mode = exploration_mode
self.num_notes_in_melody = num_notes_in_melody
self.stochastic_observations = stochastic_observations
self.algorithm = algorithm
self.priming_mode = priming_mode
self.midi_primer = midi_primer
self.training_file_list = training_file_list
self.note_rnn_checkpoint_dir = note_rnn_checkpoint_dir
self.note_rnn_checkpoint_file = note_rnn_checkpoint_file
self.note_rnn_hparams = note_rnn_hparams
self.note_rnn_type = note_rnn_type
if priming_mode == 'single_midi' and midi_primer is None:
tf.logging.fatal('A midi primer file is required when using'
'the single_midi priming mode.')
if note_rnn_checkpoint_dir is None or not note_rnn_checkpoint_dir:
print('Retrieving checkpoint of Note RNN from Magenta download server.')
urllib.request.urlretrieve(
'http://download.magenta.tensorflow.org/models/'
'rl_tuner_note_rnn.ckpt', 'note_rnn.ckpt')
self.note_rnn_checkpoint_dir = os.getcwd()
self.note_rnn_checkpoint_file = os.path.join(os.getcwd(),
'note_rnn.ckpt')
if self.note_rnn_hparams is None:
if self.note_rnn_type == 'basic_rnn':
self.note_rnn_hparams = rl_tuner_ops.basic_rnn_hparams()
else:
self.note_rnn_hparams = rl_tuner_ops.default_hparams()
if self.algorithm == 'g' or self.algorithm == 'pure_rl':
self.reward_mode = 'music_theory_only'
if dqn_hparams is None:
self.dqn_hparams = rl_tuner_ops.default_dqn_hparams()
else:
self.dqn_hparams = dqn_hparams
self.discount_rate = tf.constant(self.dqn_hparams.discount_rate)
self.target_network_update_rate = tf.constant(
self.dqn_hparams.target_network_update_rate)
self.optimizer = tf.train.AdamOptimizer()
# DQN state.
self.actions_executed_so_far = 0
self.experience = collections.deque(
maxlen=self.dqn_hparams.max_experience)
self.iteration = 0
self.summary_writer = summary_writer
self.num_times_store_called = 0
self.num_times_train_called = 0
# Stored reward metrics.
self.reward_last_n = 0
self.rewards_batched = []
self.music_theory_reward_last_n = 0
self.music_theory_rewards_batched = []
self.note_rnn_reward_last_n = 0
self.note_rnn_rewards_batched = []
self.eval_avg_reward = []
self.eval_avg_music_theory_reward = []
self.eval_avg_note_rnn_reward = []
self.target_val_list = []
# Variables to keep track of characteristics of the current composition
# TODO(natashajaques): Implement composition as a class to obtain data
# encapsulation so that you can't accidentally change the leap direction.
self.beat = 0
self.composition = []
self.composition_direction = 0
self.leapt_from = None # stores the note at which composition leapt
self.steps_since_last_leap = 0
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if initialize_immediately:
self.initialize_internal_models_graph_session()
def initialize_internal_models_graph_session(self,
restore_from_checkpoint=True):
"""Initializes internal RNN models, builds the graph, starts the session.
Adds the graphs of the internal RNN models to this graph, adds the DQN ops
to the graph, and starts a new Saver and session. By having a separate
function for this rather than doing it in the constructor, it allows a model
inheriting from this class to define its q_network differently.
Args:
restore_from_checkpoint: If True, the weights for the 'q_network',
'target_q_network', and 'reward_rnn' will be loaded from a checkpoint.
If false, these models will be initialized with random weights. Useful
for checking what pure RL (with no influence from training data) sounds
like.
"""
with self.graph.as_default():
# Add internal networks to the graph.
tf.logging.info('Initializing q network')
self.q_network = note_rnn_loader.NoteRNNLoader(
self.graph, 'q_network',
self.note_rnn_checkpoint_dir,
midi_primer=self.midi_primer,
training_file_list=self.training_file_list,
checkpoint_file=self.note_rnn_checkpoint_file,
hparams=self.note_rnn_hparams,
note_rnn_type=self.note_rnn_type)
tf.logging.info('Initializing target q network')
self.target_q_network = note_rnn_loader.NoteRNNLoader(
self.graph,
'target_q_network',
self.note_rnn_checkpoint_dir,
midi_primer=self.midi_primer,
training_file_list=self.training_file_list,
checkpoint_file=self.note_rnn_checkpoint_file,
hparams=self.note_rnn_hparams,
note_rnn_type=self.note_rnn_type)
tf.logging.info('Initializing reward network')
self.reward_rnn = note_rnn_loader.NoteRNNLoader(
self.graph, 'reward_rnn',
self.note_rnn_checkpoint_dir,
midi_primer=self.midi_primer,
training_file_list=self.training_file_list,
checkpoint_file=self.note_rnn_checkpoint_file,
hparams=self.note_rnn_hparams,
note_rnn_type=self.note_rnn_type)
tf.logging.info('Q network cell: %s', self.q_network.cell)
# Add rest of variables to graph.
tf.logging.info('Adding RL graph variables')
self.build_graph()
# Prepare saver and session.
self.saver = tf.train.Saver()
self.session = tf.Session(graph=self.graph)
self.session.run(tf.global_variables_initializer())
# Initialize internal networks.
if restore_from_checkpoint:
self.q_network.initialize_and_restore(self.session)
self.target_q_network.initialize_and_restore(self.session)
self.reward_rnn.initialize_and_restore(self.session)
# Double check that the model was initialized from checkpoint properly.
reward_vars = self.reward_rnn.variables()
q_vars = self.q_network.variables()
reward1 = self.session.run(reward_vars[0])
q1 = self.session.run(q_vars[0])
if np.sum((q1 - reward1)**2) == 0.0:
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print('\nSuccessfully initialized internal nets from checkpoint!')
tf.logging.info('\nSuccessfully initialized internal nets from '
'checkpoint!')
else:
tf.logging.fatal('Error! The model was not initialized from '
'checkpoint properly')
else:
self.q_network.initialize_new(self.session)
self.target_q_network.initialize_new(self.session)
self.reward_rnn.initialize_new(self.session)
if self.priming_mode == 'random_midi':
tf.logging.info('Getting priming melodies')
self.get_priming_melodies()
def get_priming_melodies(self):
"""Runs a batch of training data through MelodyRNN model.
If the priming mode is 'random_midi', priming the q-network requires a
random training melody. Therefore this function runs a batch of data from
the training directory through the internal model, and the resulting
internal states of the LSTM are stored in a list. The next note in each
training melody is also stored in a corresponding list called
'priming_notes'. Therefore, to prime the model with a random melody, it is
only necessary to select a random index from 0 to batch_size-1 and use the
hidden states and note at that index as input to the model.
"""
(next_note_softmax,
self.priming_states, lengths) = self.q_network.run_training_batch()
# Get the next note that was predicted for each priming melody to be used
# in priming.
self.priming_notes = [0] * len(lengths)
for i in range(len(lengths)):
# Each melody has TRAIN_SEQUENCE_LENGTH outputs, but the last note is
# actually stored at lengths[i]. The rest is padding.
start_i = i * TRAIN_SEQUENCE_LENGTH
end_i = start_i + lengths[i] - 1
end_softmax = next_note_softmax[end_i, :]
self.priming_notes[i] = np.argmax(end_softmax)
tf.logging.info('Stored priming notes: %s', self.priming_notes)
def prime_internal_model(self, model):
"""Prime an internal model such as the q_network based on priming mode.
Args:
model: The internal model that should be primed.
Returns:
The first observation to feed into the model.
"""
model.state_value = model.get_zero_state()
if self.priming_mode == 'random_midi':
priming_idx = np.random.randint(0, len(self.priming_states))
model.state_value = np.reshape(
self.priming_states[priming_idx, :],
(1, model.cell.state_size))
priming_note = self.priming_notes[priming_idx]
next_obs = np.array(
rl_tuner_ops.make_onehot([priming_note], self.num_actions)).flatten()
tf.logging.debug(
'Feeding priming state for midi file %s and corresponding note %s',
priming_idx, priming_note)
elif self.priming_mode == 'single_midi':
model.prime_model()
next_obs = model.priming_note
elif self.priming_mode == 'random_note':
next_obs = self.get_random_note()
else:
tf.logging.warn('Error! Invalid priming mode. Priming with random note')
next_obs = self.get_random_note()
return next_obs
def get_random_note(self):
"""Samle a note uniformly at random.
Returns:
random note
"""
note_idx = np.random.randint(0, self.num_actions - 1)
return np.array(rl_tuner_ops.make_onehot([note_idx],
self.num_actions)).flatten()
def reset_composition(self):
"""Starts the models internal composition over at beat 0, with no notes.
Also resets statistics about whether the composition is in the middle of a
melodic leap.
"""
self.beat = 0
self.composition = []
self.composition_direction = 0
self.leapt_from = None
self.steps_since_last_leap = 0
def build_graph(self):
"""Builds the reinforcement learning tensorflow graph."""
tf.logging.info('Adding reward computation portion of the graph')
with tf.name_scope('reward_computation'):
self.reward_scores = tf.identity(self.reward_rnn(), name='reward_scores')
tf.logging.info('Adding taking action portion of graph')
with tf.name_scope('taking_action'):
# Output of the q network gives the value of taking each action (playing
# each note).
self.action_scores = tf.identity(self.q_network(), name='action_scores')
tf.summary.histogram(
'action_scores', self.action_scores)
# The action values for the G algorithm are computed differently.
if self.algorithm == 'g':
self.g_action_scores = self.action_scores + self.reward_scores
# Compute predicted action, which is the argmax of the action scores.
self.action_softmax = tf.nn.softmax(self.g_action_scores,
name='action_softmax')
self.predicted_actions = tf.one_hot(tf.argmax(self.g_action_scores,
dimension=1,
name='predicted_actions'),
self.num_actions)
else:
# Compute predicted action, which is the argmax of the action scores.
self.action_softmax = tf.nn.softmax(self.action_scores,
name='action_softmax')
self.predicted_actions = tf.one_hot(tf.argmax(self.action_scores,
dimension=1,
name='predicted_actions'),
self.num_actions)
tf.logging.info('Add estimating future rewards portion of graph')
with tf.name_scope('estimating_future_rewards'):
# The target q network is used to estimate the value of the best action at
# the state resulting from the current action.
self.next_action_scores = tf.stop_gradient(self.target_q_network())
tf.summary.histogram(
'target_action_scores', self.next_action_scores)
# Rewards are observed from the environment and are fed in later.
self.rewards = tf.placeholder(tf.float32, (None,), name='rewards')
# Each algorithm is attempting to model future rewards with a different
# function.
if self.algorithm == 'psi':
self.target_vals = tf.reduce_logsumexp(self.next_action_scores,
reduction_indices=[1,])
elif self.algorithm == 'g':
self.g_normalizer = tf.reduce_logsumexp(self.reward_scores,
reduction_indices=[1,])
self.g_normalizer = tf.reshape(self.g_normalizer, [-1, 1])
self.g_normalizer = tf.tile(self.g_normalizer, [1, self.num_actions])
self.g_action_scores = tf.subtract(
(self.next_action_scores + self.reward_scores), self.g_normalizer)
self.target_vals = tf.reduce_logsumexp(self.g_action_scores,
reduction_indices=[1,])
else:
# Use default based on Q learning.
self.target_vals = tf.reduce_max(self.next_action_scores,
reduction_indices=[1,])
# Total rewards are the observed rewards plus discounted estimated future
# rewards.
self.future_rewards = self.rewards + self.discount_rate * self.target_vals
tf.logging.info('Adding q value prediction portion of graph')
with tf.name_scope('q_value_prediction'):
# Action mask will be a one-hot encoding of the action the network
# actually took.
self.action_mask = tf.placeholder(tf.float32, (None, self.num_actions),
name='action_mask')
self.masked_action_scores = tf.reduce_sum(self.action_scores *
self.action_mask,
reduction_indices=[1,])
temp_diff = self.masked_action_scores - self.future_rewards
# Prediction error is the mean squared error between the reward the
# network actually received for a given action, and what it expected to
# receive.
self.prediction_error = tf.reduce_mean(tf.square(temp_diff))
# Compute gradients.
self.params = tf.trainable_variables()
self.gradients = self.optimizer.compute_gradients(self.prediction_error)
# Clip gradients.
for i, (grad, var) in enumerate(self.gradients):
if grad is not None:
self.gradients[i] = (tf.clip_by_norm(grad, 5), var)
for grad, var in self.gradients:
tf.summary.histogram(var.name, var)
if grad is not None:
tf.summary.histogram(var.name + '/gradients', grad)
# Backprop.
self.train_op = self.optimizer.apply_gradients(self.gradients)
tf.logging.info('Adding target network update portion of graph')
with tf.name_scope('target_network_update'):
# Updates the target_q_network to be similar to the q_network based on
# the target_network_update_rate.
self.target_network_update = []
for v_source, v_target in zip(self.q_network.variables(),
self.target_q_network.variables()):
# Equivalent to target = (1-alpha) * target + alpha * source
update_op = v_target.assign_sub(self.target_network_update_rate *
(v_target - v_source))
self.target_network_update.append(update_op)
self.target_network_update = tf.group(*self.target_network_update)
tf.summary.scalar(
'prediction_error', self.prediction_error)
self.summarize = tf.summary.merge_all()
self.no_op1 = tf.no_op()
def train(self, num_steps=10000, exploration_period=5000, enable_random=True):
"""Main training function that allows model to act, collects reward, trains.
Iterates a number of times, getting the model to act each time, saving the
experience, and performing backprop.
Args:
num_steps: The number of training steps to execute.
exploration_period: The number of steps over which the probability of
exploring (taking a random action) is annealed from 1.0 to the model's
random_action_probability.
enable_random: If False, the model will not be able to act randomly /
explore.
"""
tf.logging.info('Evaluating initial model...')
self.evaluate_model()
self.actions_executed_so_far = 0
if self.stochastic_observations:
tf.logging.info('Using stochastic environment')
sample_next_obs = False
if self.exploration_mode == 'boltzmann' or self.stochastic_observations:
sample_next_obs = True
self.reset_composition()
last_observation = self.prime_internal_models()
for i in range(num_steps):
# Experiencing observation, state, action, reward, new observation,
# new state tuples, and storing them.
state = np.array(self.q_network.state_value).flatten()
action, new_observation, reward_scores = self.action(
last_observation, exploration_period, enable_random=enable_random,
sample_next_obs=sample_next_obs)
new_state = np.array(self.q_network.state_value).flatten()
new_reward_state = np.array(self.reward_rnn.state_value).flatten()
reward = self.collect_reward(last_observation, new_observation,
reward_scores)
self.store(last_observation, state, action, reward, new_observation,
new_state, new_reward_state)
# Used to keep track of how the reward is changing over time.
self.reward_last_n += reward
# Used to keep track of the current musical composition and beat for
# the reward functions.
self.composition.append(np.argmax(new_observation))
self.beat += 1
if i > 0 and i % self.output_every_nth == 0:
tf.logging.info('Evaluating model...')
self.evaluate_model()
self.save_model(self.algorithm)
if self.algorithm == 'g':
self.rewards_batched.append(
self.music_theory_reward_last_n + self.note_rnn_reward_last_n)
else:
self.rewards_batched.append(self.reward_last_n)
self.music_theory_rewards_batched.append(
self.music_theory_reward_last_n)
self.note_rnn_rewards_batched.append(self.note_rnn_reward_last_n)
# Save a checkpoint.
save_step = len(self.rewards_batched)*self.output_every_nth
self.saver.save(self.session, self.save_path, global_step=save_step)
r = self.reward_last_n
tf.logging.info('Training iteration %s', i)
tf.logging.info('\tReward for last %s steps: %s',
self.output_every_nth, r)
tf.logging.info('\t\tMusic theory reward: %s',
self.music_theory_reward_last_n)
tf.logging.info('\t\tNote RNN reward: %s', self.note_rnn_reward_last_n)
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print('Training iteration', i)
print('\tReward for last', self.output_every_nth, 'steps:', r)
print('\t\tMusic theory reward:', self.music_theory_reward_last_n)
print('\t\tNote RNN reward:', self.note_rnn_reward_last_n)
if self.exploration_mode == 'egreedy':
exploration_p = rl_tuner_ops.linear_annealing(
self.actions_executed_so_far, exploration_period, 1.0,
self.dqn_hparams.random_action_probability)
tf.logging.info('\tExploration probability is %s', exploration_p)
self.reward_last_n = 0
self.music_theory_reward_last_n = 0
self.note_rnn_reward_last_n = 0
# Backprop.
self.training_step()
# Update current state as last state.
last_observation = new_observation
# Reset the state after each composition is complete.
if self.beat % self.num_notes_in_melody == 0:
tf.logging.debug('\nResetting composition!\n')
self.reset_composition()
last_observation = self.prime_internal_models()
def action(self, observation, exploration_period=0, enable_random=True,
sample_next_obs=False):
"""Given an observation, runs the q_network to choose the current action.
Does not backprop.
Args:
observation: A one-hot encoding of a single observation (note).
exploration_period: The total length of the period the network will
spend exploring, as set in the train function.
enable_random: If False, the network cannot act randomly.
sample_next_obs: If True, the next observation will be sampled from
the softmax probabilities produced by the model, and passed back
along with the action. If False, only the action is passed back.
Returns:
The action chosen, the reward_scores returned by the reward_rnn, and the
next observation. If sample_next_obs is False, the next observation is
equal to the action.
"""
assert len(observation.shape) == 1, 'Single observation only'
self.actions_executed_so_far += 1
if self.exploration_mode == 'egreedy':
# Compute the exploration probability.
exploration_p = rl_tuner_ops.linear_annealing(
self.actions_executed_so_far, exploration_period, 1.0,
self.dqn_hparams.random_action_probability)
elif self.exploration_mode == 'boltzmann':
enable_random = False
sample_next_obs = True
# Run the observation through the q_network.
input_batch = np.reshape(observation,
(self.q_network.batch_size, 1, self.input_size))
lengths = np.full(self.q_network.batch_size, 1, dtype=int)
(action, action_softmax, self.q_network.state_value,
reward_scores, self.reward_rnn.state_value) = self.session.run(
[self.predicted_actions, self.action_softmax,
self.q_network.state_tensor, self.reward_scores,
self.reward_rnn.state_tensor],
{self.q_network.melody_sequence: input_batch,
self.q_network.initial_state: self.q_network.state_value,
self.q_network.lengths: lengths,
self.reward_rnn.melody_sequence: input_batch,
self.reward_rnn.initial_state: self.reward_rnn.state_value,
self.reward_rnn.lengths: lengths})
reward_scores = np.reshape(reward_scores, (self.num_actions))
action_softmax = np.reshape(action_softmax, (self.num_actions))
action = np.reshape(action, (self.num_actions))
if enable_random and random.random() < exploration_p:
note = self.get_random_note()
return note, note, reward_scores
else:
if not sample_next_obs:
return action, action, reward_scores
else:
obs_note = rl_tuner_ops.sample_softmax(action_softmax)
next_obs = np.array(
rl_tuner_ops.make_onehot([obs_note], self.num_actions)).flatten()
return action, next_obs, reward_scores
def store(self, observation, state, action, reward, newobservation, newstate,
new_reward_state):
"""Stores an experience in the model's experience replay buffer.
One experience consists of an initial observation and internal LSTM state,
which led to the execution of an action, the receipt of a reward, and
finally a new observation and a new LSTM internal state.
Args:
observation: A one hot encoding of an observed note.
state: The internal state of the q_network MelodyRNN LSTM model.
action: A one hot encoding of action taken by network.
reward: Reward received for taking the action.
newobservation: The next observation that resulted from the action.
Unless stochastic_observations is True, the action and new
observation will be the same.
newstate: The internal state of the q_network MelodyRNN that is
observed after taking the action.
new_reward_state: The internal state of the reward_rnn network that is
observed after taking the action
"""
if self.num_times_store_called % self.dqn_hparams.store_every_nth == 0:
self.experience.append((observation, state, action, reward,
newobservation, newstate, new_reward_state))
self.num_times_store_called += 1
def training_step(self):
"""Backpropagate prediction error from a randomly sampled experience batch.
A minibatch of experiences is randomly sampled from the model's experience
replay buffer and used to update the weights of the q_network and
target_q_network.
"""
if self.num_times_train_called % self.dqn_hparams.train_every_nth == 0:
if len(self.experience) < self.dqn_hparams.minibatch_size:
return
# Sample experience.
samples = random.sample(range(len(self.experience)),
self.dqn_hparams.minibatch_size)
samples = [self.experience[i] for i in samples]
# Batch states.
states = np.empty((len(samples), self.q_network.cell.state_size))
new_states = np.empty((len(samples),
self.target_q_network.cell.state_size))
reward_new_states = np.empty((len(samples),
self.reward_rnn.cell.state_size))
observations = np.empty((len(samples), self.input_size))
new_observations = np.empty((len(samples), self.input_size))
action_mask = np.zeros((len(samples), self.num_actions))
rewards = np.empty((len(samples),))
lengths = np.full(len(samples), 1, dtype=int)
for i, (o, s, a, r, new_o, new_s, reward_s) in enumerate(samples):
observations[i, :] = o
new_observations[i, :] = new_o
states[i, :] = s
new_states[i, :] = new_s
action_mask[i, :] = a
rewards[i] = r
reward_new_states[i, :] = reward_s
observations = np.reshape(observations,
(len(samples), 1, self.input_size))
new_observations = np.reshape(new_observations,
(len(samples), 1, self.input_size))
calc_summaries = self.iteration % 100 == 0
calc_summaries = calc_summaries and self.summary_writer is not None
if self.algorithm == 'g':
_, _, target_vals, summary_str = self.session.run([
self.prediction_error,
self.train_op,
self.target_vals,
self.summarize if calc_summaries else self.no_op1,
], {
self.reward_rnn.melody_sequence: new_observations,
self.reward_rnn.initial_state: reward_new_states,
self.reward_rnn.lengths: lengths,
self.q_network.melody_sequence: observations,
self.q_network.initial_state: states,
self.q_network.lengths: lengths,
self.target_q_network.melody_sequence: new_observations,
self.target_q_network.initial_state: new_states,
self.target_q_network.lengths: lengths,
self.action_mask: action_mask,
self.rewards: rewards,
})
else:
_, _, target_vals, summary_str = self.session.run([
self.prediction_error,
self.train_op,
self.target_vals,
self.summarize if calc_summaries else self.no_op1,
], {
self.q_network.melody_sequence: observations,
self.q_network.initial_state: states,
self.q_network.lengths: lengths,
self.target_q_network.melody_sequence: new_observations,
self.target_q_network.initial_state: new_states,
self.target_q_network.lengths: lengths,
self.action_mask: action_mask,
self.rewards: rewards,
})
total_logs = (self.iteration * self.dqn_hparams.train_every_nth)
if total_logs % self.output_every_nth == 0:
self.target_val_list.append(np.mean(target_vals))
self.session.run(self.target_network_update)
if calc_summaries:
self.summary_writer.add_summary(summary_str, self.iteration)
self.iteration += 1
self.num_times_train_called += 1
def evaluate_model(self, num_trials=100, sample_next_obs=True):
"""Used to evaluate the rewards the model receives without exploring.
Generates num_trials compositions and computes the note_rnn and music
theory rewards. Uses no exploration so rewards directly relate to the
model's policy. Stores result in internal variables.
Args:
num_trials: The number of compositions to use for evaluation.
sample_next_obs: If True, the next note the model plays will be
sampled from its output distribution. If False, the model will
deterministically choose the note with maximum value.
"""
note_rnn_rewards = [0] * num_trials
music_theory_rewards = [0] * num_trials
total_rewards = [0] * num_trials
for t in range(num_trials):
last_observation = self.prime_internal_models()
self.reset_composition()
for _ in range(self.num_notes_in_melody):
_, new_observation, reward_scores = self.action(
last_observation,
0,
enable_random=False,
sample_next_obs=sample_next_obs)
note_rnn_reward = self.reward_from_reward_rnn_scores(new_observation,
reward_scores)
music_theory_reward = self.reward_music_theory(new_observation)
adjusted_mt_reward = self.reward_scaler * music_theory_reward
total_reward = note_rnn_reward + adjusted_mt_reward
note_rnn_rewards[t] = note_rnn_reward
music_theory_rewards[t] = music_theory_reward * self.reward_scaler
total_rewards[t] = total_reward
self.composition.append(np.argmax(new_observation))
self.beat += 1
last_observation = new_observation
self.eval_avg_reward.append(np.mean(total_rewards))
self.eval_avg_note_rnn_reward.append(np.mean(note_rnn_rewards))
self.eval_avg_music_theory_reward.append(np.mean(music_theory_rewards))
def collect_reward(self, obs, action, reward_scores):
"""Calls whatever reward function is indicated in the reward_mode field.
New reward functions can be written and called from here. Note that the
reward functions can make use of the musical composition that has been
played so far, which is stored in self.composition. Some reward functions
are made up of many smaller functions, such as those related to music
theory.
Args:
obs: A one-hot encoding of the observed note.
action: A one-hot encoding of the chosen action.
reward_scores: The value for each note output by the reward_rnn.
Returns:
Float reward value.
"""
# Gets and saves log p(a|s) as output by reward_rnn.
note_rnn_reward = self.reward_from_reward_rnn_scores(action, reward_scores)
self.note_rnn_reward_last_n += note_rnn_reward
if self.reward_mode == 'scale':
# Makes the model play a scale (defaults to c major).
reward = self.reward_scale(obs, action)
elif self.reward_mode == 'key':
# Makes the model play within a key.
reward = self.reward_key_distribute_prob(action)
elif self.reward_mode == 'key_and_tonic':
# Makes the model play within a key, while starting and ending on the
# tonic note.
reward = self.reward_key(action)
reward += self.reward_tonic(action)
elif self.reward_mode == 'non_repeating':
# The model can play any composition it wants, but receives a large
# negative reward for playing the same note repeatedly.
reward = self.reward_non_repeating(action)
elif self.reward_mode == 'music_theory_random':
# The model receives reward for playing in key, playing tonic notes,
# and not playing repeated notes. However the rewards it receives are
# uniformly distributed over all notes that do not violate these rules.
reward = self.reward_key(action)
reward += self.reward_tonic(action)
reward += self.reward_penalize_repeating(action)
elif self.reward_mode == 'music_theory_basic':
# As above, the model receives reward for playing in key, tonic notes
# at the appropriate times, and not playing repeated notes. However, the
# rewards it receives are based on the note probabilities learned from
# data in the original model.
reward = self.reward_key(action)
reward += self.reward_tonic(action)
reward += self.reward_penalize_repeating(action)
return reward * self.reward_scaler + note_rnn_reward
elif self.reward_mode == 'music_theory_basic_plus_variety':
# Uses the same reward function as above, but adds a penalty for
# compositions with a high autocorrelation (aka those that don't have
# sufficient variety).
reward = self.reward_key(action)
reward += self.reward_tonic(action)
reward += self.reward_penalize_repeating(action)
reward += self.reward_penalize_autocorrelation(action)
return reward * self.reward_scaler + note_rnn_reward
elif self.reward_mode == 'preferred_intervals':
reward = self.reward_preferred_intervals(action)
elif self.reward_mode == 'music_theory_all':
tf.logging.debug('Note RNN reward: %s', note_rnn_reward)
reward = self.reward_music_theory(action)
tf.logging.debug('Total music theory reward: %s',
self.reward_scaler * reward)
tf.logging.debug('Total note rnn reward: %s', note_rnn_reward)
self.music_theory_reward_last_n += reward * self.reward_scaler
return reward * self.reward_scaler + note_rnn_reward
elif self.reward_mode == 'music_theory_only':
reward = self.reward_music_theory(action)
else:
tf.logging.fatal('ERROR! Not a valid reward mode. Cannot compute reward')
self.music_theory_reward_last_n += reward * self.reward_scaler
return reward * self.reward_scaler
def reward_from_reward_rnn_scores(self, action, reward_scores):
"""Rewards based on probabilities learned from data by trained RNN.
Computes the reward_network's learned softmax probabilities. When used as
rewards, allows the model to maintain information it learned from data.
Args:
action: A one-hot encoding of the chosen action.
reward_scores: The value for each note output by the reward_rnn.
Returns:
Float reward value.
"""
action_note = np.argmax(action)
normalization_constant = scipy.special.logsumexp(reward_scores)
return reward_scores[action_note] - normalization_constant
def get_reward_rnn_scores(self, observation, state):
"""Get note scores from the reward_rnn to use as a reward based on data.
Runs the reward_rnn on an observation and initial state. Useful for
maintaining the probabilities of the original LSTM model while training with
reinforcement learning.
Args:
observation: One-hot encoding of the observed note.
state: Vector representing the internal state of the target_q_network
LSTM.
Returns:
Action scores produced by reward_rnn.
"""
state = np.atleast_2d(state)
input_batch = np.reshape(observation, (self.reward_rnn.batch_size, 1,
self.num_actions))
lengths = np.full(self.reward_rnn.batch_size, 1, dtype=int)
rewards, = self.session.run(
self.reward_scores,
{self.reward_rnn.melody_sequence: input_batch,
self.reward_rnn.initial_state: state,
self.reward_rnn.lengths: lengths})
return rewards
def reward_music_theory(self, action):
"""Computes cumulative reward for all music theory functions.
Args:
action: A one-hot encoding of the chosen action.
Returns:
Float reward value.
"""
reward = self.reward_key(action)
tf.logging.debug('Key: %s', reward)
prev_reward = reward
reward += self.reward_tonic(action)
if reward != prev_reward:
tf.logging.debug('Tonic: %s', reward)
prev_reward = reward
reward += self.reward_penalize_repeating(action)
if reward != prev_reward:
tf.logging.debug('Penalize repeating: %s', reward)
prev_reward = reward
reward += self.reward_penalize_autocorrelation(action)
if reward != prev_reward:
tf.logging.debug('Penalize autocorr: %s', reward)
prev_reward = reward
reward += self.reward_motif(action)
if reward != prev_reward:
tf.logging.debug('Reward motif: %s', reward)
prev_reward = reward
reward += self.reward_repeated_motif(action)
if reward != prev_reward:
tf.logging.debug('Reward repeated motif: %s', reward)
prev_reward = reward
# New rewards based on Gauldin's book, "A Practical Approach to Eighteenth
# Century Counterpoint"
reward += self.reward_preferred_intervals(action)
if reward != prev_reward:
tf.logging.debug('Reward preferred_intervals: %s', reward)
prev_reward = reward
reward += self.reward_leap_up_back(action)
if reward != prev_reward:
tf.logging.debug('Reward leap up back: %s', reward)
prev_reward = reward
reward += self.reward_high_low_unique(action)
if reward != prev_reward:
tf.logging.debug('Reward high low unique: %s', reward)
return reward
def random_reward_shift_to_mean(self, reward):
"""Modifies reward by a small random values s to pull it towards the mean.
If reward is above the mean, s is subtracted; if reward is below the mean,
s is added. The random value is in the range 0-0.2. This function is helpful
to ensure that the model does not become too certain about playing a
particular note.
Args:
reward: A reward value that has already been computed by another reward
function.
Returns:
Original float reward value modified by scaler.
"""
s = np.random.randint(0, 2) * .1
if reward > .5:
reward -= s
else:
reward += s
return reward
def reward_scale(self, obs, action, scale=None):
"""Reward function that trains the model to play a scale.
Gives rewards for increasing notes, notes within the desired scale, and two
consecutive notes from the scale.
Args:
obs: A one-hot encoding of the observed note.
action: A one-hot encoding of the chosen action.
scale: The scale the model should learn. Defaults to C Major if not
provided.
Returns:
Float reward value.
"""
if scale is None:
scale = rl_tuner_ops.C_MAJOR_SCALE
obs = np.argmax(obs)
action = np.argmax(action)
reward = 0
if action == 1:
reward += .1
if obs < action < obs + 3:
reward += .05
if action in scale:
reward += .01
if obs in scale:
action_pos = scale.index(action)
obs_pos = scale.index(obs)
if obs_pos == len(scale) - 1 and action_pos == 0:
reward += .8
elif action_pos == obs_pos + 1:
reward += .8
return reward
def reward_key_distribute_prob(self, action, key=None):
"""Reward function that rewards the model for playing within a given key.
Any note within the key is given equal reward, which can cause the model to
learn random sounding compositions.
Args:
action: One-hot encoding of the chosen action.
key: The numeric values of notes belonging to this key. Defaults to C
Major if not provided.
Returns:
Float reward value.
"""
if key is None:
key = rl_tuner_ops.C_MAJOR_KEY
reward = 0
action_note = np.argmax(action)
if action_note in key:
num_notes_in_key = len(key)
extra_prob = 1.0 / num_notes_in_key
reward = extra_prob
return reward
def reward_key(self, action, penalty_amount=-1.0, key=None):
"""Applies a penalty for playing notes not in a specific key.
Args:
action: One-hot encoding of the chosen action.
penalty_amount: The amount the model will be penalized if it plays
a note outside the key.
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
Float reward value.
"""
if key is None:
key = rl_tuner_ops.C_MAJOR_KEY
reward = 0
action_note = np.argmax(action)
if action_note not in key:
reward = penalty_amount
return reward
def reward_tonic(self, action, tonic_note=rl_tuner_ops.C_MAJOR_TONIC,
reward_amount=3.0):
"""Rewards for playing the tonic note at the right times.
Rewards for playing the tonic as the first note of the first bar, and the
first note of the final bar.
Args:
action: One-hot encoding of the chosen action.
tonic_note: The tonic/1st note of the desired key.
reward_amount: The amount the model will be awarded if it plays the
tonic note at the right time.
Returns:
Float reward value.
"""
action_note = np.argmax(action)
first_note_of_final_bar = self.num_notes_in_melody - 4
if self.beat == 0 or self.beat == first_note_of_final_bar:
if action_note == tonic_note:
return reward_amount
elif self.beat == first_note_of_final_bar + 1:
if action_note == NO_EVENT:
return reward_amount
elif self.beat > first_note_of_final_bar + 1:
if action_note in (NO_EVENT, NOTE_OFF):
return reward_amount
return 0.0
def reward_non_repeating(self, action):
"""Rewards the model for not playing the same note over and over.
Penalizes the model for playing the same note repeatedly, although more
repeititions are allowed if it occasionally holds the note or rests in
between. Reward is uniform when there is no penalty.
Args:
action: One-hot encoding of the chosen action.
Returns:
Float reward value.
"""
penalty = self.reward_penalize_repeating(action)
if penalty >= 0:
return .1
def detect_repeating_notes(self, action_note):
"""Detects whether the note played is repeating previous notes excessively.
Args:
action_note: An integer representing the note just played.
Returns:
True if the note just played is excessively repeated, False otherwise.
"""
num_repeated = 0
contains_held_notes = False
contains_breaks = False
# Note that the current action yas not yet been added to the composition
for i in range(len(self.composition)-1, -1, -1):
if self.composition[i] == action_note:
num_repeated += 1
elif self.composition[i] == NOTE_OFF:
contains_breaks = True
elif self.composition[i] == NO_EVENT:
contains_held_notes = True
else:
break
if action_note == NOTE_OFF and num_repeated > 1:
return True
elif not contains_held_notes and not contains_breaks:
if num_repeated > 4:
return True
elif contains_held_notes or contains_breaks:
if num_repeated > 6:
return True
else:
if num_repeated > 8:
return True
return False
def reward_penalize_repeating(self,
action,
penalty_amount=-100.0):
"""Sets the previous reward to 0 if the same is played repeatedly.
Allows more repeated notes if there are held notes or rests in between. If
no penalty is applied will return the previous reward.
Args:
action: One-hot encoding of the chosen action.
penalty_amount: The amount the model will be penalized if it plays
repeating notes.
Returns:
Previous reward or 'penalty_amount'.
"""
action_note = np.argmax(action)
is_repeating = self.detect_repeating_notes(action_note)
if is_repeating:
return penalty_amount
else:
return 0.0
def reward_penalize_autocorrelation(self,
action,
penalty_weight=3.0):
"""Reduces the previous reward if the composition is highly autocorrelated.
Penalizes the model for creating a composition that is highly correlated
with itself at lags of 1, 2, and 3 beats previous. This is meant to
encourage variety in compositions.
Args:
action: One-hot encoding of the chosen action.
penalty_weight: The default weight which will be multiplied by the sum
of the autocorrelation coefficients, and subtracted from prev_reward.
Returns:
Float reward value.
"""
composition = self.composition + [np.argmax(action)]
lags = [1, 2, 3]
sum_penalty = 0
for lag in lags:
coeff = rl_tuner_ops.autocorrelate(composition, lag=lag)
if not np.isnan(coeff):
if np.abs(coeff) > 0.15:
sum_penalty += np.abs(coeff) * penalty_weight
return -sum_penalty
def detect_last_motif(self, composition=None, bar_length=8):
"""Detects if a motif was just played and if so, returns it.
A motif should contain at least three distinct notes that are not note_on
or note_off, and occur within the course of one bar.
Args:
composition: The composition in which the function will look for a
recent motif. Defaults to the model's composition.
bar_length: The number of notes in one bar.
Returns:
None if there is no motif, otherwise the motif in the same format as the
composition.
"""
if composition is None:
composition = self.composition
if len(composition) < bar_length:
return None, 0
last_bar = composition[-bar_length:]
actual_notes = [a for a in last_bar if a not in (NO_EVENT, NOTE_OFF)]
num_unique_notes = len(set(actual_notes))
if num_unique_notes >= 3:
return last_bar, num_unique_notes
else:
return None, num_unique_notes
def reward_motif(self, action, reward_amount=3.0):
"""Rewards the model for playing any motif.
Motif must have at least three distinct notes in the course of one bar.
There is a bonus for playing more complex motifs; that is, ones that involve
a greater number of notes.
Args:
action: One-hot encoding of the chosen action.
reward_amount: The amount that will be returned if the last note belongs
to a motif.
Returns:
Float reward value.
"""
composition = self.composition + [np.argmax(action)]
motif, num_notes_in_motif = self.detect_last_motif(composition=composition)
if motif is not None:
motif_complexity_bonus = max((num_notes_in_motif - 3)*.3, 0)
return reward_amount + motif_complexity_bonus
else:
return 0.0
def detect_repeated_motif(self, action, bar_length=8):
"""Detects whether the last motif played repeats an earlier motif played.
Args:
action: One-hot encoding of the chosen action.
bar_length: The number of beats in one bar. This determines how many beats
the model has in which to play the motif.
Returns:
True if the note just played belongs to a motif that is repeated. False
otherwise.
"""
composition = self.composition + [np.argmax(action)]
if len(composition) < bar_length:
return False, None
motif, _ = self.detect_last_motif(
composition=composition, bar_length=bar_length)
if motif is None:
return False, None
prev_composition = self.composition[:-(bar_length-1)]
# Check if the motif is in the previous composition.
for i in range(len(prev_composition) - len(motif) + 1):
for j in range(len(motif)):
if prev_composition[i + j] != motif[j]:
break
else:
return True, motif
return False, None
def reward_repeated_motif(self,
action,
bar_length=8,
reward_amount=4.0):
"""Adds a big bonus to previous reward if the model plays a repeated motif.
Checks if the model has just played a motif that repeats an ealier motif in
the composition.
There is also a bonus for repeating more complex motifs.
Args:
action: One-hot encoding of the chosen action.
bar_length: The number of notes in one bar.
reward_amount: The amount that will be added to the reward if the last
note belongs to a repeated motif.
Returns:
Float reward value.
"""
is_repeated, motif = self.detect_repeated_motif(action, bar_length)
if is_repeated:
actual_notes = [a for a in motif if a not in (NO_EVENT, NOTE_OFF)]
num_notes_in_motif = len(set(actual_notes))
motif_complexity_bonus = max(num_notes_in_motif - 3, 0)
return reward_amount + motif_complexity_bonus
else:
return 0.0
def detect_sequential_interval(self, action, key=None):
"""Finds the melodic interval between the action and the last note played.
Uses constants to represent special intervals like rests.
Args:
action: One-hot encoding of the chosen action
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
An integer value representing the interval, or a constant value for
special intervals.
"""
if not self.composition:
return 0, None, None
prev_note = self.composition[-1]
action_note = np.argmax(action)
c_major = False
if key is None:
key = rl_tuner_ops.C_MAJOR_KEY
c_notes = [2, 14, 26]
g_notes = [9, 21, 33]
e_notes = [6, 18, 30]
c_major = True
tonic_notes = [2, 14, 26]
fifth_notes = [9, 21, 33]
# get rid of non-notes in prev_note
prev_note_index = len(self.composition) - 1
while prev_note in (NO_EVENT, NOTE_OFF) and prev_note_index >= 0:
prev_note = self.composition[prev_note_index]
prev_note_index -= 1
if prev_note in (NOTE_OFF, NO_EVENT):
tf.logging.debug('Action_note: %s, prev_note: %s', action_note, prev_note)
return 0, action_note, prev_note
tf.logging.debug('Action_note: %s, prev_note: %s', action_note, prev_note)
# get rid of non-notes in action_note
if action_note == NO_EVENT:
if prev_note in tonic_notes or prev_note in fifth_notes:
return (rl_tuner_ops.HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH,
action_note, prev_note)
else:
return rl_tuner_ops.HOLD_INTERVAL, action_note, prev_note
elif action_note == NOTE_OFF:
if prev_note in tonic_notes or prev_note in fifth_notes:
return (rl_tuner_ops.REST_INTERVAL_AFTER_THIRD_OR_FIFTH,
action_note, prev_note)
else:
return rl_tuner_ops.REST_INTERVAL, action_note, prev_note
interval = abs(action_note - prev_note)
if c_major and interval == rl_tuner_ops.FIFTH and (
prev_note in c_notes or prev_note in g_notes):
return rl_tuner_ops.IN_KEY_FIFTH, action_note, prev_note
if c_major and interval == rl_tuner_ops.THIRD and (
prev_note in c_notes or prev_note in e_notes):
return rl_tuner_ops.IN_KEY_THIRD, action_note, prev_note
return interval, action_note, prev_note
def reward_preferred_intervals(self, action, scaler=5.0, key=None):
"""Dispenses reward based on the melodic interval just played.
Args:
action: One-hot encoding of the chosen action.
scaler: This value will be multiplied by all rewards in this function.
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
Float reward value.
"""
interval, _, _ = self.detect_sequential_interval(action, key)
tf.logging.debug('Interval:', interval)
if interval == 0: # either no interval or involving uninteresting rests
tf.logging.debug('No interval or uninteresting.')
return 0.0
reward = 0.0
# rests can be good
if interval == rl_tuner_ops.REST_INTERVAL:
reward = 0.05
tf.logging.debug('Rest interval.')
if interval == rl_tuner_ops.HOLD_INTERVAL:
reward = 0.075
if interval == rl_tuner_ops.REST_INTERVAL_AFTER_THIRD_OR_FIFTH:
reward = 0.15
tf.logging.debug('Rest interval after 1st or 5th.')
if interval == rl_tuner_ops.HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH:
reward = 0.3
# large leaps and awkward intervals bad
if interval == rl_tuner_ops.SEVENTH:
reward = -0.3
tf.logging.debug('7th')
if interval > rl_tuner_ops.OCTAVE:
reward = -1.0
tf.logging.debug('More than octave.')
# common major intervals are good
if interval == rl_tuner_ops.IN_KEY_FIFTH:
reward = 0.1
tf.logging.debug('In key 5th')
if interval == rl_tuner_ops.IN_KEY_THIRD:
reward = 0.15
tf.logging.debug('In key 3rd')
# smaller steps are generally preferred
if interval == rl_tuner_ops.THIRD:
reward = 0.09
tf.logging.debug('3rd')
if interval == rl_tuner_ops.SECOND:
reward = 0.08
tf.logging.debug('2nd')
if interval == rl_tuner_ops.FOURTH:
reward = 0.07
tf.logging.debug('4th')
# larger leaps not as good, especially if not in key
if interval == rl_tuner_ops.SIXTH:
reward = 0.05
tf.logging.debug('6th')
if interval == rl_tuner_ops.FIFTH:
reward = 0.02
tf.logging.debug('5th')
tf.logging.debug('Interval reward', reward * scaler)
return reward * scaler
def detect_high_unique(self, composition):
"""Checks a composition to see if the highest note within it is repeated.
Args:
composition: A list of integers representing the notes in the piece.
Returns:
True if the lowest note was unique, False otherwise.
"""
max_note = max(composition)
return list(composition).count(max_note) == 1
def detect_low_unique(self, composition):
"""Checks a composition to see if the lowest note within it is repeated.
Args:
composition: A list of integers representing the notes in the piece.
Returns:
True if the lowest note was unique, False otherwise.
"""
no_special_events = [x for x in composition
if x not in (NO_EVENT, NOTE_OFF)]
if no_special_events:
min_note = min(no_special_events)
if list(composition).count(min_note) == 1:
return True
return False
def reward_high_low_unique(self, action, reward_amount=3.0):
"""Evaluates if highest and lowest notes in composition occurred once.
Args:
action: One-hot encoding of the chosen action.
reward_amount: Amount of reward that will be given for the highest note
being unique, and again for the lowest note being unique.
Returns:
Float reward value.
"""
if len(self.composition) + 1 != self.num_notes_in_melody:
return 0.0
composition = np.array(self.composition)
composition = np.append(composition, np.argmax(action))
reward = 0.0
if self.detect_high_unique(composition):
reward += reward_amount
if self.detect_low_unique(composition):
reward += reward_amount
return reward
def detect_leap_up_back(self, action, steps_between_leaps=6):
"""Detects when the composition takes a musical leap, and if it is resolved.
When the composition jumps up or down by an interval of a fifth or more,
it is a 'leap'. The model then remembers that is has a 'leap direction'. The
function detects if it then takes another leap in the same direction, if it
leaps back, or if it gradually resolves the leap.
Args:
action: One-hot encoding of the chosen action.
steps_between_leaps: Leaping back immediately does not constitute a
satisfactory resolution of a leap. Therefore the composition must wait
'steps_between_leaps' beats before leaping back.
Returns:
0 if there is no leap, 'LEAP_RESOLVED' if an existing leap has been
resolved, 'LEAP_DOUBLED' if 2 leaps in the same direction were made.
"""
if not self.composition:
return 0
outcome = 0
interval, action_note, prev_note = self.detect_sequential_interval(action)
if action_note in (NOTE_OFF, NO_EVENT):
self.steps_since_last_leap += 1
tf.logging.debug('Rest, adding to steps since last leap. It is'
'now: %s', self.steps_since_last_leap)
return 0
# detect if leap
if interval >= rl_tuner_ops.FIFTH or interval == rl_tuner_ops.IN_KEY_FIFTH:
if action_note > prev_note:
leap_direction = rl_tuner_ops.ASCENDING
tf.logging.debug('Detected an ascending leap')
else:
leap_direction = rl_tuner_ops.DESCENDING
tf.logging.debug('Detected a descending leap')
# there was already an unresolved leap
if self.composition_direction != 0:
if self.composition_direction != leap_direction:
tf.logging.debug('Detected a resolved leap')
tf.logging.debug('Num steps since last leap: %s',
self.steps_since_last_leap)
if self.steps_since_last_leap > steps_between_leaps:
outcome = rl_tuner_ops.LEAP_RESOLVED
tf.logging.debug('Sufficient steps before leap resolved, '
'awarding bonus')
self.composition_direction = 0
self.leapt_from = None
else:
tf.logging.debug('Detected a double leap')
outcome = rl_tuner_ops.LEAP_DOUBLED
# the composition had no previous leaps
else:
tf.logging.debug('There was no previous leap direction')
self.composition_direction = leap_direction
self.leapt_from = prev_note
self.steps_since_last_leap = 0
# there is no leap
else:
self.steps_since_last_leap += 1
tf.logging.debug('No leap, adding to steps since last leap. '
'It is now: %s', self.steps_since_last_leap)
# If there was a leap before, check if composition has gradually returned
# This could be changed by requiring you to only go a 5th back in the
# opposite direction of the leap.
if (self.composition_direction == rl_tuner_ops.ASCENDING and
action_note <= self.leapt_from) or (
self.composition_direction == rl_tuner_ops.DESCENDING and
action_note >= self.leapt_from):
tf.logging.debug('detected a gradually resolved leap')
outcome = rl_tuner_ops.LEAP_RESOLVED
self.composition_direction = 0
self.leapt_from = None
return outcome
def reward_leap_up_back(self, action, resolving_leap_bonus=5.0,
leaping_twice_punishment=-5.0):
"""Applies punishment and reward based on the principle leap up leap back.
Large interval jumps (more than a fifth) should be followed by moving back
in the same direction.
Args:
action: One-hot encoding of the chosen action.
resolving_leap_bonus: Amount of reward dispensed for resolving a previous
leap.
leaping_twice_punishment: Amount of reward received for leaping twice in
the same direction.
Returns:
Float reward value.
"""
leap_outcome = self.detect_leap_up_back(action)
if leap_outcome == rl_tuner_ops.LEAP_RESOLVED:
tf.logging.debug('Leap resolved, awarding %s', resolving_leap_bonus)
return resolving_leap_bonus
elif leap_outcome == rl_tuner_ops.LEAP_DOUBLED:
tf.logging.debug('Leap doubled, awarding %s', leaping_twice_punishment)
return leaping_twice_punishment
else:
return 0.0
def reward_interval_diversity(self):
# TODO(natashajaques): music theory book also suggests having a mix of steps
# that are both incremental and larger. Want to write a function that
# rewards this. Could have some kind of interval_stats stored by
# reward_preferred_intervals function.
pass
def generate_music_sequence(self, title='rltuner_sample',
visualize_probs=False, prob_image_name=None,
length=None, most_probable=False):
"""Generates a music sequence with the current model, and saves it to MIDI.
The resulting MIDI file is saved to the model's output_dir directory. The
sequence is generated by sampling from the output probabilities at each
timestep, and feeding the resulting note back in as input to the model.
Args:
title: The name that will be used to save the output MIDI file.
visualize_probs: If True, the function will plot the softmax
probabilities of the model for each note that occur throughout the
sequence. Useful for debugging.
prob_image_name: The name of a file in which to save the softmax
probability image. If None, the image will simply be displayed.
length: The length of the sequence to be generated. Defaults to the
num_notes_in_melody parameter of the model.
most_probable: If True, instead of sampling each note in the sequence,
the model will always choose the argmax, most probable note.
"""
if length is None:
length = self.num_notes_in_melody
self.reset_composition()
next_obs = self.prime_internal_models()
tf.logging.info('Priming with note %s', np.argmax(next_obs))
lengths = np.full(self.q_network.batch_size, 1, dtype=int)
if visualize_probs:
prob_image = np.zeros((self.input_size, length))
generated_seq = [0] * length
for i in range(length):
input_batch = np.reshape(next_obs, (self.q_network.batch_size, 1,
self.num_actions))
if self.algorithm == 'g':
(softmax, self.q_network.state_value,
self.reward_rnn.state_value) = self.session.run(
[self.action_softmax, self.q_network.state_tensor,
self.reward_rnn.state_tensor],
{self.q_network.melody_sequence: input_batch,
self.q_network.initial_state: self.q_network.state_value,
self.q_network.lengths: lengths,
self.reward_rnn.melody_sequence: input_batch,
self.reward_rnn.initial_state: self.reward_rnn.state_value,
self.reward_rnn.lengths: lengths})
else:
softmax, self.q_network.state_value = self.session.run(
[self.action_softmax, self.q_network.state_tensor],
{self.q_network.melody_sequence: input_batch,
self.q_network.initial_state: self.q_network.state_value,
self.q_network.lengths: lengths})
softmax = np.reshape(softmax, (self.num_actions))
if visualize_probs:
prob_image[:, i] = softmax # np.log(1.0 + softmax)
if most_probable:
sample = np.argmax(softmax)
else:
sample = rl_tuner_ops.sample_softmax(softmax)
generated_seq[i] = sample
next_obs = np.array(rl_tuner_ops.make_onehot([sample],
self.num_actions)).flatten()
tf.logging.info('Generated sequence: %s', generated_seq)
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print('Generated sequence:', generated_seq)
melody = mlib.Melody(rl_tuner_ops.decoder(generated_seq,
self.q_network.transpose_amount))
sequence = melody.to_sequence(qpm=rl_tuner_ops.DEFAULT_QPM)
filename = rl_tuner_ops.get_next_file_name(self.output_dir, title, 'mid')
midi_io.sequence_proto_to_midi_file(sequence, filename)
tf.logging.info('Wrote a melody to %s', self.output_dir)
if visualize_probs:
tf.logging.info('Visualizing note selection probabilities:')
plt.figure()
plt.imshow(prob_image, interpolation='none', cmap='Reds')
plt.ylabel('Note probability')
plt.xlabel('Time (beat)')
plt.gca().invert_yaxis()
if prob_image_name is not None:
plt.savefig(self.output_dir + '/' + prob_image_name)
else:
plt.show()
def evaluate_music_theory_metrics(self, num_compositions=10000, key=None,
tonic_note=rl_tuner_ops.C_MAJOR_TONIC):
"""Computes statistics about music theory rule adherence.
Args:
num_compositions: How many compositions should be randomly generated
for computing the statistics.
key: The numeric values of notes belonging to this key. Defaults to C
Major if not provided.
tonic_note: The tonic/1st note of the desired key.
Returns:
A dictionary containing the statistics.
"""
stat_dict = rl_tuner_eval_metrics.compute_composition_stats(
self,
num_compositions=num_compositions,
composition_length=self.num_notes_in_melody,
key=key,
tonic_note=tonic_note)
return stat_dict
def save_model(self, name, directory=None):
"""Saves a checkpoint of the model and a .npz file with stored rewards.
Args:
name: String name to use for the checkpoint and rewards files.
directory: Path to directory where the data will be saved. Defaults to
self.output_dir if None is provided.
"""
if directory is None:
directory = self.output_dir
save_loc = os.path.join(directory, name)
self.saver.save(self.session, save_loc,
global_step=len(self.rewards_batched)*self.output_every_nth)
self.save_stored_rewards(name)
def save_stored_rewards(self, file_name):
"""Saves the models stored rewards over time in a .npz file.
Args:
file_name: Name of the file that will be saved.
"""
training_epochs = len(self.rewards_batched) * self.output_every_nth
filename = os.path.join(self.output_dir,
file_name + '-' + str(training_epochs))
np.savez(filename,
train_rewards=self.rewards_batched,
train_music_theory_rewards=self.music_theory_rewards_batched,
train_note_rnn_rewards=self.note_rnn_rewards_batched,
eval_rewards=self.eval_avg_reward,
eval_music_theory_rewards=self.eval_avg_music_theory_reward,
eval_note_rnn_rewards=self.eval_avg_note_rnn_reward,
target_val_list=self.target_val_list)
def save_model_and_figs(self, name, directory=None):
"""Saves the model checkpoint, .npz file, and reward plots.
Args:
name: Name of the model that will be used on the images,
checkpoint, and .npz files.
directory: Path to directory where files will be saved.
If None defaults to self.output_dir.
"""
self.save_model(name, directory=directory)
self.plot_rewards(image_name='TrainRewards-' + name + '.eps',
directory=directory)
self.plot_evaluation(image_name='EvaluationRewards-' + name + '.eps',
directory=directory)
self.plot_target_vals(image_name='TargetVals-' + name + '.eps',
directory=directory)
def plot_rewards(self, image_name=None, directory=None):
"""Plots the cumulative rewards received as the model was trained.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
"""
if directory is None:
directory = self.output_dir
reward_batch = self.output_every_nth
x = [reward_batch * i for i in np.arange(len(self.rewards_batched))]
plt.figure()
plt.plot(x, self.rewards_batched)
plt.plot(x, self.music_theory_rewards_batched)
plt.plot(x, self.note_rnn_rewards_batched)
plt.xlabel('Training epoch')
plt.ylabel('Cumulative reward for last ' + str(reward_batch) + ' steps')
plt.legend(['Total', 'Music theory', 'Note RNN'], loc='best')
if image_name is not None:
plt.savefig(directory + '/' + image_name)
else:
plt.show()
def plot_evaluation(self, image_name=None, directory=None, start_at_epoch=0):
"""Plots the rewards received as the model was evaluated during training.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
start_at_epoch: Training epoch where the plot should begin.
"""
if directory is None:
directory = self.output_dir
reward_batch = self.output_every_nth
x = [reward_batch * i for i in np.arange(len(self.eval_avg_reward))]
start_index = start_at_epoch / self.output_every_nth
plt.figure()
plt.plot(x[start_index:], self.eval_avg_reward[start_index:])
plt.plot(x[start_index:], self.eval_avg_music_theory_reward[start_index:])
plt.plot(x[start_index:], self.eval_avg_note_rnn_reward[start_index:])
plt.xlabel('Training epoch')
plt.ylabel('Average reward')
plt.legend(['Total', 'Music theory', 'Note RNN'], loc='best')
if image_name is not None:
plt.savefig(directory + '/' + image_name)
else:
plt.show()
def plot_target_vals(self, image_name=None, directory=None):
"""Plots the target values used to train the model over time.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
"""
if directory is None:
directory = self.output_dir
reward_batch = self.output_every_nth
x = [reward_batch * i for i in np.arange(len(self.target_val_list))]
plt.figure()
plt.plot(x, self.target_val_list)
plt.xlabel('Training epoch')
plt.ylabel('Target value')
if image_name is not None:
plt.savefig(directory + '/' + image_name)
else:
plt.show()
def prime_internal_models(self):
"""Primes both internal models based on self.priming_mode.
Returns:
A one-hot encoding of the note output by the q_network to be used as
the initial observation.
"""
self.prime_internal_model(self.target_q_network)
self.prime_internal_model(self.reward_rnn)
next_obs = self.prime_internal_model(self.q_network)
return next_obs
def restore_from_directory(self, directory=None, checkpoint_name=None,
reward_file_name=None):
"""Restores this model from a saved checkpoint.
Args:
directory: Path to directory where checkpoint is located. If
None, defaults to self.output_dir.
checkpoint_name: The name of the checkpoint within the
directory.
reward_file_name: The name of the .npz file where the stored
rewards are saved. If None, will not attempt to load stored
rewards.
"""
if directory is None:
directory = self.output_dir
if checkpoint_name is not None:
checkpoint_file = os.path.join(directory, checkpoint_name)
else:
tf.logging.info('Directory %s.', directory)
checkpoint_file = tf.train.latest_checkpoint(directory)
if checkpoint_file is None:
tf.logging.fatal('Error! Cannot locate checkpoint in the directory')
return
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print('Attempting to restore from checkpoint', checkpoint_file)
tf.logging.info('Attempting to restore from checkpoint %s', checkpoint_file)
self.saver.restore(self.session, checkpoint_file)
if reward_file_name is not None:
npz_file_name = os.path.join(directory, reward_file_name)
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print('Attempting to load saved reward values from file', npz_file_name)
tf.logging.info('Attempting to load saved reward values from file %s',
npz_file_name)
npz_file = np.load(npz_file_name)
self.rewards_batched = npz_file['train_rewards']
self.music_theory_rewards_batched = npz_file['train_music_theory_rewards']
self.note_rnn_rewards_batched = npz_file['train_note_rnn_rewards']
self.eval_avg_reward = npz_file['eval_rewards']
self.eval_avg_music_theory_reward = npz_file['eval_music_theory_rewards']
self.eval_avg_note_rnn_reward = npz_file['eval_note_rnn_rewards']
self.target_val_list = npz_file['target_val_list']
| apache-2.0 |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py | 1 | 2188 | from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '{0!s}'".format(projection))
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| agpl-3.0 |
anve8004/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.