repo_name stringlengths 9 55 | path stringlengths 7 120 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 169k | license stringclasses 12 values |
|---|---|---|---|---|---|
mikeireland/chronostar | projects/scocen/cmd_age_sequence_AG_with_lithium.py | 1 | 8855 | """
Plot CMDs for CUT components and show that components with higher kinematic
age show less overluminosity in comparison to others.
"""
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
plt.ion()
# Pretty plots
from fig_settings import *
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
comps_filename = lib.comps_filename
compnames = lib.compnames
colors = lib.colors
############################################
comps_to_plot = ['A', 'G'] # AGE SEQUENCE IS REAL!!! ['C', 'A', 'U', 'G', 'F', 'T']
############################################
# Minimal probability required for membership
pmin_membership = 0.9
############################################
# CMD limits
xlim = [-0.5, 5]
ylim = [16, -4]
############################################
# Read data
try:
tab = tab0
comps = comps0
except:
# This table should include EW(Li) column!
tab0 = Table.read(data_filename)
Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec
tab0['Gmag'] = Gmag
# ADD LITHIUM INFO
galah = Table.read('/Users/marusa/galah/lithium/ewli/GALAH_EW_Lithium_Zerjal_20181025.fits')
lithium_galah = galah[['source_id', 'EW(Li)']]
results_2m3 = Table.read('/Users/marusa/observing/23m/paper/data/2m3_results.fits')
lithium_2m3 = results_2m3[['source_id', 'EW(Li)']]
from astropy.table import vstack, unique, join
lithium = vstack([lithium_galah, lithium_2m3])
lithium = unique(lithium, keys=['source_id'], keep='last') # Some are repeated!
tab0 = join(tab0, lithium, keys='source_id', join_type='left')
comps0 = Table.read(comps_filename)
tab = tab0
comps = comps0
compsd = dict(zip(comps['comp_ID'], comps))
# TAKE ONLY GOOD COMPS, AND SORT THEM BY THEIR AGE
comps_good = []
for l in comps_to_plot:
for c in comps:
comp_ID = c['comp_ID']
if comp_ID==l:
comps_good.append(c)
break
def isochrone(ax, plot_young_iso=True):
"""
Padova isochrones
"""
# 1 Gyr
iso = np.loadtxt('data/padova_gaiadr2_evans_1gyr_met0.dat', comments='#')
#Skip giants
last = 80
first = 2
Gmag = iso[first:last,-8]
BP = iso[first:last,-7]
RP = iso[first:last,-6]
ax.plot(BP-RP, Gmag, c='k', lw=0.8, label = '1 Gyr, [M/H]=0')
# Young isochrones
if plot_young_iso:
iso = np.loadtxt('data/padova_gaiadr2_evans_4myr_met0.dat', comments='#')
#Skip giants
last = 150
first = 2
Gmag = iso[first:last,-8]
BP = iso[first:last,-7]
RP = iso[first:last,-6]
ax.plot(BP-RP, Gmag, c='k', lw=0.8, ls='--', label = '4 Myr, [M/H]=0')
iso = np.loadtxt('data/padova_gaiadr2_evans_15myr_met0.dat', comments='#')
#Skip giants
last = 150
first = 2
Gmag = iso[first:last,-8]
BP = iso[first:last,-7]
RP = iso[first:last,-6]
ax.plot(BP-RP, Gmag, c='k', lw=0.8, ls=':', label = '15 Myr, [M/H]=0')
def spectral_types(ax, xlim, ylim, y = -2.4, ymin=0.9):
# This is BP-RP
lw=0.5
ax.axvline(x=-0.037, ymin=ymin, linewidth=lw, color='k') # A0, according to Mamajek
ax.axvline(x=0.369, ymin=ymin, linewidth=lw, color='k') # F
ax.axvline(x=0.767, ymin=ymin, linewidth=lw, color='k') # G
ax.axvline(x=0.979, ymin=ymin, linewidth=lw, color='k') # K
ax.axvline(x=1.848, ymin=ymin, linewidth=lw, color='k') # M0
#~ ax.axvline(x=4.86, ymin=0.9, linewidth=lw, color='k') # M9 (Mamajek)
# Annotations
#~ y = -2.4
c='k'
s=12
ax.annotate('A', xy=(-0.037+0.15, y), xytext=(-0.037+0.15, y), color=c, size=s)
ax.annotate('F', xy=(0.369+0.15, y), xytext=(0.369+0.15, y), color=c, size=s)
ax.annotate('G', xy=(0.767+0.05, y), xytext=(0.767+0.05, y), color=c, size=s)
ax.annotate('K', xy=(0.979+0.4, y), xytext=(0.979+0.4, y), color=c, size=s)
#~ ax.annotate('M', xy=(1.848+0.5, y), xytext=(1.848+0.5, y), color=c, size=s)
ax.annotate('M0', xy=(1.848+0.1, y), xytext=(1.848+0.1, y), color=c, size=s)
ax.annotate('M5', xy=(3.35-0.05, y), xytext=(3.35-0.05, y), color=c, size=s)
ax.annotate('M9', xy=(4.86-0.1, y), xytext=(4.86-0.1, y), color=c, size=s)
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
return ax
fig = plt.figure(figsize=(figsize[1], figsize[0]*1.3))
gs = GridSpec(3, 1) # fig=fig
ax = fig.add_subplot(gs[1:, :])
ax2 = inset_axes(ax, width="100%", height="100%", loc=1, bbox_to_anchor=(0.7, 0.5, .3, .4), bbox_transform=ax.transAxes)
ax3 = fig.add_subplot(gs[0, :])
for c2 in comps_to_plot:
c = compsd[c2]
comp_ID = c['comp_ID']
membname = 'membership%s'%comp_ID
age=c['age']
mask=tab[membname]>pmin_membership
t=tab[mask]
ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=colors[comp_ID], label=r'%s %.1f $\pm$ %.1f Myr'%(comp_ID, age, c['Crossing_time']))
# AG
#~ if comp_ID=='G':
ax2.scatter(t['bp_rp'], t['Gmag'], s=20, c=colors[comp_ID], label=r'%s %.1f $\pm$ %.1f Myr'%(comp_ID, age, c['Crossing_time']))
# Lithium
ax3.scatter(t['bp_rp'], t['EW(Li)'], s=10, c=colors[comp_ID], label='')
isochrone(ax, plot_young_iso=False)
isochrone(ax2, plot_young_iso=False)
### Make plots pretty
# INSET
# CUT
#~ ax2=spectral_types(ax2, xlim, ylim, y = 3)
xlim2 = [0.7, 1.8]
ylim2 = [8, 2]
ax2.set_xlim(xlim2[0], xlim2[1])
ax2.set_ylim(ylim2[0], ylim2[1])
#~ ax2.indicate_inset_zoom(axins)
# Region to zoom in
lw=0.5
ls='-'
c='grey'
ax.plot(xlim2, [ylim2[0], ylim2[0]], linewidth=lw, color=c, linestyle=ls)
ax.plot(xlim2, [ylim2[1], ylim2[1]], linewidth=lw, color=c, linestyle=ls)
ax.plot([xlim2[0], xlim2[0]], ylim2, linewidth=lw, color=c, linestyle=ls)
ax.plot([xlim2[1], xlim2[1]], ylim2, linewidth=lw, color=c, linestyle=ls)
# Lines connecting the subplot
ax.plot([xlim2[1], 4.95], [ylim2[0], 6.4], linewidth=lw, color=c, linestyle=ls)
ax.plot([xlim2[0], 3.31], [ylim2[1], -1.6], linewidth=lw, color=c, linestyle=ls)
#~ ax.axvline(x=xlim2[1], ymin=ylim2[0], ymax=ylim2[1], linewidth=lw, color=c)
#~ ax.axhline(y=ylim2[0], xmin=xlim2[0]/(xlim[1]-xlim[0]), xmax=xlim2[1]/(xlim[1]-xlim[0]), linewidth=lw, color=c)
#~ ax.axhline(y=ylim2[1], xmin=xlim2[0], xmax=xlim2[1], linewidth=lw, color=c)
# AG
#~ #ax2=spectral_types(ax2, xlim, ylim, y = 9)
#~ ax2.set_xlim(2.5, 4)
#~ ax2.set_ylim(14, 7)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax2.xaxis.set_label_position("top")
ax2.xaxis.tick_top()
#~ ax2.tick_params(direction='out', length=6, width=0.5)
ax2.tick_params(direction='out', length=0, width=0.5)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
# LEGEND
handles, labels = ax.get_legend_handles_labels()
#~ labels = [labels[1], labels[2], labels[3], labels[4], labels[0]]
#~ handles = [handles[1], handles[2], handles[3], handles[4], handles[0]]
legend=ax.legend(handles, labels, markerscale=5, frameon=False, loc='center right', bbox_to_anchor=(0.23, 0.23), title='Kinematic ages', prop={'size': 8})
plt.setp(legend.get_title(),fontsize=10)
#~ legend.legendHandles[3]._sizes = [40]
#~ ax3.legend(loc='center left', frameon=False)
# Lithium axis
#~ ax3=spectral_types(ax3, xlim, [-0.2, 1], y = 0.65)
#~ ax3=spectral_types(ax3, xlim, [-0.2, 1], y = 0.8, ymin=0.8)
ax3.set_ylabel(r'EW(Li) [\AA]')
ax3.set_xlabel('Bp-Rp')
ax3.xaxis.set_label_position("top")
ax3.xaxis.tick_top()
ax3.tick_params(direction='out', length=6, width=0.5)
ax3.set_xlim(xlim[0], xlim[1])
ax3.set_ylim(-0.2, 0.8)
#~ ax3.set_ylim(-0.2, 1)
t = ax3.get_yticks()
ytick_labels = ['%.1f'%x if x>-0.1 else '' for x in t]
ax3.set_yticklabels(ytick_labels)
ax=spectral_types(ax, xlim, ylim)
#~ ax.set_xlim(xlim[0], xlim[1])
#~ ax.set_ylim(ylim[0], ylim[1])
ax.set_xlabel('Bp-Rp')
ax.set_ylabel(r'$M\mathrm{_G}$')
ax3.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(4))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(2))
t = ax.get_yticks()
ytick_labels = [int(x) if x>-3 else '' for x in t]
ax.set_yticklabels(ytick_labels)
fig.subplots_adjust(bottom=0.1, top=0.9)
fig.subplots_adjust(hspace=0, wspace=0)
plt.savefig('cmd_li_AG_90percent_membership.pdf')
plt.show()
| mit |
meteorcloudy/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 33 | 5463 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
saleiro/SemEval2017-Task5 | Code/sentiment_analysis.py | 1 | 7352 | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
import numpy as np
from sklearn.metrics import mean_absolute_error
from sklearn.metrics.pairwise import cosine_similarity
import gensim
import argparse
from scipy.sparse import hstack
from sklearn.ensemble import ExtraTreesRegressor
import operator
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from itertools import izip
class MeanEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
# if a text is empty we should return a vector of zeros
# with the same dimensionality as all the other vectors
self.dim = len(word2vec.itervalues().next())
def fit(self, X):
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] for w in words if w in self.word2vec]
or [np.zeros(self.dim)], axis=0)
for words in X
])
def get_parser():
parser = argparse.ArgumentParser(description="Sentiment Polarity and Intensity Regression")
parser.add_argument('-train', type=str, required=True, help='train file')
parser.add_argument('-test', type=str, required=True, nargs='+',help='test file')
parser.add_argument('-w2vec', type=str, required=True, nargs='+',help='w2vec file')
return parser
def loadl(f):
a = []
with open(f,"r") as fid:
for l in fid:
a.append(l.lower().strip('\n'))
return a
def loadmpqa(f):
pos = []
neg = []
neu = []
with open(f,"r") as fid:
for l in fid:
w = l.lower().strip('\n').split('\t')
if w[1] == '1':
pos.append(w[0])
elif w[1] == '-1':
neg.append(w[0])
elif w[1] == '0':
neu.append(w[0])
return pos, neg, neu
def loadmpqas(f):
pos = []
neu = []
with open(f,"r") as fid:
for l in fid:
w = l.lower().strip('\n').split('\t')
if w[1] == '1':
pos.append(w[0])
elif w[1] == '0':
neu.append(w[0])
return pos, neu
def getLexicon(msgs_train, msgs_test):
print 'getLexicon'
fol = 'lexicals/'
a = loadl(fol + 'loughran_constraining.tsv')
b = loadl(fol + 'loughran_letigious.tsv')
c = loadl(fol + 'loughran_negative.tsv')
d = loadl(fol + 'loughran_positive.tsv')
e = loadl(fol + 'loughran_uncertainty.tsv')
t = loadl(fol + 'loughran_modal.tsv')
q = loadl(fol + 'loughran_harvard.tsv')
p_pos, p_neg, p_neu = loadmpqa(fol + 'mpqa_polarity')
s_pos, s_neu = loadmpqas(fol + 'mpqa_subjectivity')
tlist = []
for msg in msgs_train:
m = set(msg.split())
a_s = set(a).intersection(m)
b_s = set(b).intersection(m)
c_s = set(c).intersection(m)
d_s = set(d).intersection(m)
e_s = set(e).intersection(m)
f_s = set(p_pos).intersection(m)
g_s = set(p_neg).intersection(m)
h_s = set(p_neu).intersection(m)
l_s = set(s_pos).intersection(m)
m_s = set(s_neu).intersection(m)
t_s = set(t).intersection(m)
q_s = set(q).intersection(m)
msg_score = 0.0
lg_score = 0.0
if len(f_s) > 0 or len(g_s) > 0:
msg_score = float(len(f_s)) - float(len(g_s))/float(len(m))
if len(c_s) > 0 or len(d_s) > 0:
lg_score = float(len(d_s)) - float(len(c_s) ) /float(len(m))
dic = {'lg_score':lg_score, 'laug_a': 1 if a_s else 0 , 'laug_b': 1 if b_s else 0, 'laug_c': 1 if c_s else 0, 'laug_d':1 if d_s else 0, 'laug_e':1 if e_s else 0}#,
tlist.append(dic)
flist = []
for msg in msgs_test:
m = set(msg.split())
a_s = set(a).intersection(m)
b_s = set(b).intersection(m)
c_s = set(c).intersection(m)
d_s = set(d).intersection(m)
e_s = set(e).intersection(m)
f_s = set(p_pos).intersection(m)
g_s = set(p_neg).intersection(m)
h_s = set(p_neu).intersection(m)
l_s = set(s_pos).intersection(m)
m_s = set(s_neu).intersection(m)
t_s = set(t).intersection(m)
q_s = set(q).intersection(m)
msg_score = 0.0
lg_score = 0.0
if len(f_s) > 0 or len(g_s) > 0:
msg_score = float(len(f_s)) - float(len(g_s))/float(len(m))
if len(c_s) > 0 or len(d_s) > 0:
lg_score = float(len(d_s)) - float(len(c_s) ) /float(len(m))
flist.append(dic)
vec = DictVectorizer(sparse=False)
vec.fit(tlist)
X_train_lex = vec.transform(tlist)
X_test_lex = vec.transform(flist)
return X_train_lex, X_test_lex
def train_predict(labels_train, msgs_train, labels_test, msgs_test, cashtags_train, cashtags_test, embeddings):
print "loading features..."
Y_train = np.array([float(x) for x in labels_train])
Y_test = np.array([float(x) for x in labels_test])
X_train_bow = []
X_train_boe = []
X_test_bow = []
X_test_boe = []
X_train = []
X_test = []
X_train_bos = []
X_test_bos = []
print 'train: ', len(Y_train), 'test:', len(Y_test)
vec = CountVectorizer(ngram_range=(1,1))
vec.fit(msgs_train)
X_train_bow = vec.transform(msgs_train).toarray()
X_test_bow = vec.transform(msgs_test).toarray()
model = gensim.models.Word2Vec.load(embeddings)
w2v = dict(zip(model.index2word, model.syn0))
vec = MeanEmbeddingVectorizer(w2v)
vec.fit(msgs_train)
X_train_boe = vec.transform(msgs_train)#.toarray()
X_test_boe = vec.transform(msgs_test)#.toarray()
print 'len x_Train_bow, X_test_bow', X_train_bow.shape , X_test_bow.shape
print 'len x_Train_boe, X_test_boe', X_train_boe.shape , X_test_boe.shape
X_train_lex, X_test_lex = getLexicon(msgs_train, msgs_test)
X_train = np.concatenate((X_train_bow,X_train_boe, X_train_lex), axis=1)
X_test = np.concatenate((X_test_bow,X_test_boe, X_test_lex), axis=1)
print 'len x_Train_, X_test_', X_train.shape , X_test.shape
clf = ExtraTreesRegressor(n_estimators=200, n_jobs=-1)
#clf = MLPRegressor(hidden_layer_sizes=(5,) ,verbose=True)
#clf = SVR(kernel='linear',C=1.0, epsilon=0.2)
print "fiting.. extra trees."
print Y_train
clf.fit(X_train, Y_train)
print "testing..."
y_hat = clf.predict(X_test)
res_dict = {}
mae = mean_absolute_error(Y_test, y_hat)
cos = cosine_similarity(Y_test, y_hat)
print 'MAE: ', mae, '\tCOSINE:', cos
return mae, cos
def readfile(path):
labels = []
msgs = []
cashtags = []
with open(path,"r") as fid:
for l in fid:
splt = l.strip('\n').lower().split("\t")
labels.append(splt[3])
msgs.append(splt[2])
cashtags.append(splt[0].split('_')[1].replace('$',''))
return labels, msgs, cashtags
def main():
parser = get_parser()
args = parser.parse_args()
train_file = args.train
test_file = args.test[0]
embeddings = args.w2vec[0]
labels_train, msgs_train, cashtags_train = readfile(train_file)
labels_test, msgs_test, cashtags_test = readfile(test_file)
train_predict(labels_train, msgs_train, labels_test, msgs_test, cashtags_train, cashtags_test, embeddings)
if __name__ == "__main__":
main()
| mit |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4agg.py | 70 | 4985 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt4 import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
# Added following line to improve realtime pan/zoom on windows:
QtGui.qApp.processEvents()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
dr-leo/pandaSDMX | pandasdmx/source/__init__.py | 1 | 6250 | from pydantic import HttpUrl
from enum import Enum
from importlib import import_module, resources
import json
from typing import Any, Dict, Union, Optional
from pandasdmx.model import DataStructureDefinition
from pandasdmx.util import BaseModel, Resource, validator
sources: Dict[str, "Source"] = {}
DataContentType = Enum("DataContentType", "XML JSON")
class Source(BaseModel):
"""SDMX-IM RESTDatasource.
This class describes the location and features supported by an SDMX data
source. Subclasses may override the hooks in order to handle specific
features of different REST web services:
.. autosummary::
handle_response
finish_message
modify_request_args
"""
#: ID of the data source
id: str
#: Base URL for queries
url: Optional[HttpUrl]
#: Human-readable name of the data source
name: str
#: documentation URL of the data source
documentation: Optional[HttpUrl]
headers: Dict[str, Any] = {}
# resource-specific URLs for end-point. Overrides `url` param
resource_urls: Dict[str, HttpUrl] = {}
default_version: str = "latest"
#: :class:`.DataContentType` indicating the type of data returned by the
#: source.
data_content_type: DataContentType = DataContentType.XML
#: Mapping from :class:`~sdmx.Resource` to :class:`bool` indicating support
#: for SDMX REST API features. Two additional keys are valid:
#:
#: - ``'preview'=True`` if the source supports ``?detail=serieskeysonly``.
#: See :meth:`.preview_data`.
#: - ``'structure-specific data'=True`` if the source can return structure-
#: specific data messages.
supports: Dict[Union[str, Resource], bool] = {Resource.data: True}
@classmethod
def from_dict(cls, info):
return cls(**info)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Set default supported features
for feature in list(Resource) + ["preview", "structure-specific data"]:
self.supports.setdefault(
feature, self.data_content_type == DataContentType.XML
)
# Hooks
def handle_response(self, response, content):
"""Handle response content of unknown type.
This hook is called by :meth:`.Request.get` *only* when
the `content` cannot be parsed as XML or JSON.
See :meth:`.estat.Source.handle_response` and
:meth:`.sgr.Source.handle_response` for example implementations.
"""
return response, content
def finish_message(self, message, request, **kwargs):
"""Postprocess retrieved message.
This hook is called by :meth:`.Request.get` after a :class:`.Message`
object has been successfully parsed from the query response.
See :meth:`.estat.Source.finish_message` for an example implementation.
"""
return message
def modify_request_args(self, kwargs):
"""Modify arguments used to build query URL.
This hook is called by :meth:`.Request.get` to modify the keyword
arguments before the query URL is built.
The default implementation handles requests for 'structure-specific
data' by adding an HTTP 'Accepts:' header when a 'dsd' is supplied as
one of the `kwargs`.
See :meth:`.sgr.Source.modify_request_args` for an example override.
Returns
-------
None
"""
if self.data_content_type is DataContentType.XML:
dsd = kwargs.get("dsd", None)
if isinstance(dsd, DataStructureDefinition):
kwargs.setdefault("headers", {})
kwargs["headers"].setdefault(
"Accept",
"application/vnd.sdmx.structurespecificdata+xml;" "version=2.1",
)
@validator("id")
def _validate_id(cls, value):
assert getattr(cls, "_id", value) == value
return value
@validator("data_content_type", pre=True)
def _validate_dct(cls, value):
if isinstance(value, DataContentType):
return value
else:
return DataContentType[value]
class _NoSource(Source):
def __init__(self):
super().__init__(id="", url=None, name="", documentation=None)
NoSource = _NoSource()
def add_source(info, id=None, override=False, **kwargs):
"""Add a new data source.
The *info* expected is in JSON format:
.. code-block:: json
{
"id": "ESTAT",
"documentation": "http://data.un.org/Host.aspx?Content=API",
"url": "http://ec.europa.eu/eurostat/SDMX/diss-web/rest",
"name": "Eurostat",
"supported": {"codelist": false, "preview": true}
}
…with unspecified values using the defaults; see
:class:`Source`.
Parameters
----------
info : dict-like
String containing JSON information about a data source.
id : str
Identifier for the new datasource. If :obj:`None` (default), then
`info['id']` is used.
override : bool
If :obj:`True`, replace any existing data source with *id*.
Otherwise, raise :class:`ValueError`.
**kwargs
Optional callbacks for *handle_response* and *finish_message* hooks.
"""
if isinstance(info, str):
info = json.loads(info)
id = info["id"] if id is None else id
info.update(kwargs)
if id in sources:
raise ValueError("Data source '%s' already defined; use override=True", id)
# Maybe import a subclass that defines a hook
SourceClass = Source
try:
mod = import_module("." + id.lower(), "pandasdmx.source")
except ImportError:
pass
else:
SourceClass = mod.Source
sources[id] = SourceClass.from_dict(info)
def list_sources():
"""Return a sorted list of valid source IDs.
These can be used to create :class:`Request` instances.
"""
return sorted(sources.keys())
def load_package_sources():
"""Discover all sources listed in ``sources.json``."""
with resources.open_binary("pandasdmx", "sources.json") as f:
for info in json.load(f):
add_source(info)
load_package_sources()
| apache-2.0 |
Kate-Willett/HadISDH_Build | MakeAreaAvgTS.py | 1 | 40798 | # PYTHON 3
#
# Author: Kate Willett
# Created: 4 March 2019
# Last update: 15 April 2019
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/PYTHON
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code reads in monthly mean gridded (5by5) netCDF files and produces area average time series
# in netCDF and ASCII
#
# Note that the mdi (-1e30) is different between IDL (float?) and Python (double?) and at the moment
# I have netCDF files created in both IDL and Python. So - first thing is to reset all missing values to
# the Python mdi used here.
# Actually now I make netCDF files with -999. as the missing data!
#
# This code was originally IDL written by Kate Willett make_area_avg_ts.pro and used
# globalmean.pro to do the area averaging which was written in IDL by Tim Osborn
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
## Modules
#from datetime import datetime
#import numpy as np
#from matplotlib.dates import date2num,num2date
#import sys, os
#from scipy.optimize import curve_fit,fsolve,leastsq
#from scipy import pi,sqrt,exp
#from scipy.special import erf
#import scipy.stats
#from math import sqrt,pi,radians,sin,cos,acos
#import struct
#from netCDF4 import Dataset
#from netCDF4 import stringtoarr # for putting strings in as netCDF variables
#import pdb
#
## Kates:
#import TestLeap
#from ReadNetCDF import GetGrid4
#from ReadNetCDF import GetGrid4Slice
#from GetNiceTimes import MakeDaysSince
#
# -----------------------
# DATA
# -----------------------
# HadISDH-land:
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/GRIDS/
# HadISDH.landq.3.0.0.2016p_FLATgridIDPHA5by5_anoms7605_JAN2017_cf.nc
# HadISDH-marine
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/GRIDS/
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocal_anoms8110_JAN2017_cf.nc
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocalship_anoms8110_JAN2017_cf.nc
# HadISDH-blend:
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/GRIDS/
# HadISDH.blendq.1.0.0.2016p_FULL_anoms8110_JAN2017_cf.nc
# HadISDH.blendq.1.0.0.2016p_FULLship_anoms8110_JAN2017_cf.nc
# Other:
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Make sure all of the EDITABLES are correct
# module load scitools/default-current
# python MakeAreaAvgTS.py
#
# NOT ANYMORE: if you want era5 data masked to HadISDH then set MaskIt = True internally
# if you want different years or regions then reset internally
#> module load scitools/default-current
#> python MakeGridTrends --var <var> --typee <type> --year1 <yyyy> --year2 <yyyy>
#
## Which variable?
# var = 'dpd' #'dpd','td','t','tw','e','q','rh'
#
## Which homog type?
# typee = 'LAND', 'RAW','OTHER', 'BLEND', 'BLENDSHIP', 'MARINE','MARINESHIP', 'ERA5','EAR5MASK','ERA5LAND','ERA5MARINE','ERA5LANDMASK','ERA5MARINEMASK'
#
# year1 and year2 are start and end year of trends
#
# -----------------------
# OUTPUT
# -----------------------
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/TIMESERIES/
# HadISDH.landq.3.0.0.2016p_FLATgridIDPHA5by5_anoms7605_JAN2017_areaTS_19732016.nc
# HadISDH.blendq.1.0.0.2016p_FULL_anoms8110_JAN2017_areaTS_19732016.nc
# HadISDH.blendq.1.0.0.2016p_FULLship_anoms8110_JAN2017_areaTS_19732016.nc
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocal_anoms8110_JAN2017_areaTS_19732016.nc
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocalship_anoms8110_JAN2017_areaTS_19732016.nc
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (5 November 2020)
# ---------
#
# Enhancements
# Now runs from command line
# Now works with ERA5 anoms and masks if desired.
#
# Changes
#
# Bug fixes
#
#
# Version 1 (15 April 2019)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
# Based on original IDL code by Kate Willett make_area_avg_ts.pro
############################################################################
# Modules
from datetime import datetime
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os, getopt
from scipy.optimize import curve_fit,fsolve,leastsq
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi,radians,sin,cos,acos
import struct
from netCDF4 import Dataset
from netCDF4 import stringtoarr # for putting strings in as netCDF variables
import pdb
# Kates:
import TestLeap
from ReadNetCDF import GetGrid4
from ReadNetCDF import GetGrid4Slice
from GetNiceTimes import MakeDaysSince
## set up variables
## EDITABLES set up directories and filenames
#mdi = -1e+30
#
## *** CHOOSE CANDIDATE set up values
#styr = 1973 # 1850, 1973, 1950, 1880, 1979
#edyr = 2019 #
#climst = 1981 # 1976 or 1981
#climed = 2010 # 2005 or 2010
#
## *** CHOOSE READ IN DATE ***
#thenmon = 'JAN'
#thenyear = '2020'
#
## *** CHOOSE PRINT OUT DATE ***
#nowmon = 'JAN'
#nowyear = '2020'
#
## *** CHOOSE PARAMETER ***
#param = 'e' #'dpd','td','t','tw','e','q','rh','w','evap'
#
## *** CHOOSE TYPE OF DATA ***
#homogtype = 'MARINEship' #'PHA','ID','DPD', 'RAW', 'OTHER', 'BLEND','BLENDship','MARINE','MARINEship','ERA-Interim', 'ERA5'
#
## *** CHOOSE VERSION IF HadISDH ***
#version = '1.0.0.2019f' # 3.0.0.3016p 1.0.0.2016p
##version = '4.2.0.2019f' # 3.0.0.3016p 1.0.0.2016p
#
## *** CHOOSE WORKING DIRECTORY ***
#workingdir = 'UPDATE'+str(edyr)
#
## *** CHOOSE WHETHER TO MASK WITH HadISDH IF NOT HadISDH ***
## IF mask=True then you will need version, thenmon, thenyear to be correct
#mask = False # default = 'False', if 'True' then mask to HadISDH equivalent
## MASKFILE (HadISDH set up values)
#mstyr = 1973 # 1850, 1973, 1950, 1880
#medyr = 2019 # 2013, 2011
#mclimst = 1981 # could be 1976 or 1981
#mclimed = 2010 # could be 2005 or 2010#
#
## *** CHOOSE WHETHER TO SUB-SELECT A DOMAIN IF NOT HADISDH ***
#domain = 'marine' # 'land','marine','blend'#
#
## *** CHOOSE WHETHER TO WORK WITH ANOMALIES OR ACTUALS - COULD ADD RENORMALISATION IF DESIRED ***
#isanom = True # 'false' for actual values, 'true' for anomalies
#
## *** Might add a renormalisation section later ***
## renorm = 'false'
#
## SEt up area average masks
#MaskDict = dict([('G',[-70.,70.]),
# ('NH',[20.,70.]),
# ('T',[-20.,20.]),
# ('SH',[-70.,-20.])])
#
##*******************************************************
#CLMlab = str(climst)[2:4]+str(climed)[2:4]
#climchoice = 'anoms'+CLMlab # 'anoms8110'
#
#MCLMlab = str(mclimst)[2:4]+str(mclimed)[2:4]
#mclimchoice = 'anoms'+MCLMlab # 'anoms8110'
#
#print('Year choice: ',styr,edyr, climst, climed)
#
#if ((mask == True) & (mclimchoice != climchoice)):
# print('Oy - your climatology periods are different between your candidate and mask!')
# print( 'Type c for continue or fix it!')
# pdb.set_trace()#
#
## Latitude and longitude gridbox width
#latlg = 5. #5., 4.
#lonlg = 5. #5., 4.
#
#indir = '/data/users/hadkw/WORKING_HADISDH/'+workingdir
#
#ParamDict = dict([('q',['q','q2m','g/kg']),
# ('rh',['RH','rh2m','%rh']),
# ('t',['T','t2m','deg C']),
# ('td',['Td','td2m','deg C']),
# ('tw',['Tw','tw2m','deg C']),
# ('e',['e','e2m','hPa']),
# ('dpd',['DPD','dpd2m','deg C']),
# ('evap',['q','evap','cm w.e.'])])#
#
## Dictionary for looking up variable standard (not actually always standard!!!) names for netCDF output of variables
#StandardNameDict = dict([('q','specific_humidity'),
# ('rh','relative_humidity'),
# ('e','vapour_pressure'),
# ('tw','wetbulb_temperature'),
# ('t','drybulb_temperature'),
# ('td','dewpoint_temperature'),
# ('dpd','dewpoint depression'),
# ('evap','evaporation')])#
#
## Dictionary for looking up variable long names for netCDF output of variables
#LongNameDict = dict([('q','specific_humidity'),
# ('rh','2m relative humidity '),
# ('e','2m vapour_pressure '),
# ('tw','2m wetbulb_temperature '),
# ('t','2m drybulb_temperature '),
# ('td','2m dewpoint_temperature '),
# ('dpd','2m dewpoint depression '),
# ('evap','evaporation from 1by1 ')])
#
#unitees = ParamDict[param][2]
#varname = param#
#
#if (homogtype == 'ERA-Interim') | (homogtype == 'ERA5'):
# infile = indir+'/OTHERDATA/'+ParamDict[param][1]+'_5by5_monthly_anoms1981-2010_'+homogtype+'_data_1979'+str(edyr)+'.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/'+ParamDict[param][1]+'_5by5_monthly_anoms1981-2010_'+homogtype+'_areaTS_1979'+str(edyr)
# # reset varname for ERA
# varname = ParamDict[param][1]
#elif (homogtype == 'MARINE'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.marine'+ParamDict[param][0]+'.'+version+'_BClocal5by5both_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.marine'+ParamDict[param][0]+'.'+version+'_BClocal5by5both_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'MARINEship'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.marine'+ParamDict[param][0]+'.'+version+'_BClocalSHIP5by5both_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.marine'+ParamDict[param][0]+'.'+version+'_BClocalSHIP5by5both_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'ID'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridIDPHA5by5_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'PHA'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridPHA5by5_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridPHA5by5_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'DPD'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridPHADPD5by5_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridPHADPD5by5_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'RAW'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridRAW5by5_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridRAW5by5_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'BLEND'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.blend'+ParamDict[param][0]+'.'+version+'_BClocal5by5both_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.blend'+ParamDict[param][0]+'.'+version+'_BClocal5by5both_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)
#elif (homogtype == 'BLENDship'):
# infile = indir+'/STATISTICS/GRIDS/HadISDH.blend'+ParamDict[param][0]+'.'+version+'_BClocalSHIP5by5both_anoms8110_'+thenmon+thenyear+'_cf.nc'
# outfile = indir+'/STATISTICS/TIMESERIES/HadISDH.blend'+ParamDict[param][0]+'.'+version+'_BClocalSHIP5by5both_anoms8110_'+nowmon+nowyear+'_areaTS_'+str(styr)+str(edyr)#
#
#if (domain == 'land'):
# maskfile = indir+'/STATISTICS/GRIDS/HadISDH.land'+ParamDict[param][0]+'.'+version+'_FLATgridIDPHA5by5_'+mclimchoice+'_'+thenmon+thenyear+'_cf.nc'
#if (domain == 'marine'):
# maskfile = indir+'/STATISTICS/GRIDS/HadISDH.marine'+ParamDict[param][0]+'.'+version+'_BClocalSHIP5by5both_'+mclimchoice+'_'+thenmon+thenyear+'_cf.nc'
#if (domain == 'blend'):
# maskfile = indir+'/STATISTICS/GRIDS/HadISDH.blend'+ParamDict[param][0]+'.'+version+'_FLATgridIDPHA5by5_'+mclimchoice+'_'+thenmon+thenyear+'_cf.nc'#
#
#inlandcover = '/data/users/hadkw/WORKING_HADISDH/'+workingdir+'/OTHERDATA/HadCRUT.4.3.0.0.land_fraction.nc'#
#
#if (mask == True):
# outfile = outfile+'_MASK'#
#
#if (isanom == False):
# outfile = outfile+'_ABS'
#
#
## Time and dimension variables
#nyrs = (edyr+1)-styr
#nmons = nyrs*12
#stlt = -90+(latlg/2.)
#stln = -180+(lonlg/2.)
#nlats = int(180/latlg)
#nlons = int(360/lonlg)#
#
#mnyrs = (medyr+1)-mstyr
#mnmons = mnyrs*12#
#
## Get pointers for pulling out the matching time slice of masked data
#mskstpt = (styr - mstyr) * 12
#mskedpt = mnmons
#
#lats = (np.arange(nlats)*latlg) + stlt
#lons = (np.arange(nlons)*lonlg) + stln
mdi = -1e30 # missing data indicator
#
############################################################################
# SUBROUTINES #
############################################################################
# AreaMean
def AreaMean(DataField,TheLats,MaskField=None,Cover=None):
'''
This function computes the spatial area average using cosine weighting of the latitudes
Its based on original IDL code by Tim Osborn globalmean.pro
Computes a global mean from a field (or set of fields if fd is 3D),
accounting for missing data.
A separate mask (of same dimension) can (has to be at the moment) be supplied if required.
Single hemisphere means can also be returned (not at the moment)
The number of boxes with data is returned in cover (not at the moment)
mdi is passed from above
DOES NOT ASSUME MASK IS IDENTICAL FOR EACH TIME STEP!!!
INPUTS:
DataField[:,:,:] - time, lat, lon np.array of data - can cope with missing data (mdi)
TheLats[:] - np.array of latitudes
Optional:
MaskField[:,:,:] - if not supplied then average computed over entire DataField
Cover[:] - if supplied then the number of boxes non-missing is returned per time step
OUTPUTS:
DataTS[:] - np.array time series of area averages
'''
# Find dimensions
fullsize = np.shape(DataField)
if (len(fullsize) < 2) | (len(fullsize) > 3):
print('DataField must be 2D or 3D')
pdb.set_trace()
# Set up dimensions depending on whether its 2D or 3D
if (len(fullsize) == 3):
Ntims = fullsize[0]
Nlons = fullsize[2] #nx = fullsize(1)
Nlats = fullsize[1] #ny = fullsize(2)
else:
Ntims = 1
Nlons = fullsize[1] #nx = fullsize(1)
Nlats = fullsize[0] #ny = fullsize(2)
# if a mask is supplied then use to remove points from fd
masksize = np.shape(MaskField)
if (len(masksize) > 0):
if (len(masksize) != len(fullsize)) & (len(masksize) != 2):
print('Mask is wrong size')
pdb.set_trace()
# Set up dimensions depending on whether its 2D or 3D
if (len(masksize) == 3):
if (masksize[0] != Ntims):
print('Mask is wrong size')
pdb.set_trace()
if (masksize[2] != Nlons) | (masksize[1] != Nlats):
print('Mask is wrong size')
pdb.set_trace()
Ntimsmask = masksize[0]
else:
if (masksize[1] != Nlons) | (masksize[0] != Nlats):
print('Mask is wrong size')
pdb.set_trace()
Ntimsmask = 1
# In the case of no mask then compute over all boxes
else:
Ntimsmask = 1
MaskField = np.empty((Nlats,Nlons),dtype = float) # IDL was lons,lats
# Now make arrays
# IDL code below but it seems redundant to me because data was already lon, lat, time in IDL
# In python its time, lat, lon!!!
#fd = reform(fd,nx,ny,nz)
#mask=reform(mask,nx,ny,nzmask)
sumval = np.zeros(Ntims,dtype = float)
sumarea = np.zeros(Ntims,dtype = float)
# # For southern hemisphere component
# sval = np.zeros(Ntims,dtype = float)
# sarea = np.zeros(Ntims,dtype = float)
# # For northern hemisphere component
# nval = np.zeros(Ntims,dtype = float)
# narea = np.zeros(Ntims,dtype = float)
# If Cover exists then set up for filling it
CoverTest = np.shape(Cover)
if (len(CoverTest) > 0):
# For number of non-mdi boxes contributing
Cover = np.zeros(Ntims,dtype = float)
# print('Test AreaMean set up so far')
# pdb.set_trace()
# If the MaskField has been supplied then it should have the same dimensions as DataField
# DOes not assume that mask is identical for each time step
for ln in range(Nlons): #i
for lt in range(Nlats): #j
# print(ln,lt)
# Is this lat/lon a 1 or an mdi in the mask - 1 = compute!
temp_data = np.copy(DataField[:,lt,ln])
CarryOn = 0
if (Ntims == Ntimsmask):
temp_mask = np.copy(MaskField[:,lt,ln])
mask_cover = np.where(temp_mask == mdi)
if (len(mask_cover[0]) > 0):
temp_data[mask_cover] = mdi #
kl = np.where(temp_data != mdi)
CarryOn = 1
else:
if (MaskField[lt,ln] != mdi):
kl = np.where(temp_data != mdi)
CarryOn = 1
if (CarryOn == 1) & (len(kl) > 0):
# print('Test kl values and how this bit works')
# pdb.set_trace()
sumval[kl] = sumval[kl] + temp_data[kl]*cos(radians(TheLats[lt]))
sumarea[kl] = sumarea[kl] + cos(radians(TheLats[lt]))
if (len(CoverTest) > 0):
Cover[kl] = Cover[kl] + 1.
# if (TheLats[lt] < 0.):
# sval[kl] = sval[kl] + DataField[kl,lt,ln]*cos(radians(TheLats[lt]))
# sarea[kl] = sarea[kl] + cos(radians(TheLats[lt]))
# else:
# nval[kl] = nval[kl] + DataField[kl,lt,ln]*cos(radians(TheLats[lt]))
# narea[kl] = narea[kl] + cos(radians(TheLats[lt]))
gots = np.where(sumarea > 0)
if (len(gots[0]) > 0):
sumval[gots] = sumval[gots] / sumarea[gots]
misses = np.where(sumarea == 0)
if (len(misses[0]) > 0):
sumval[misses] = mdi
if (Ntims == 1): # convert to scalars
sumval = sumval[0]
if (len(CoverTest) > 0):
return sumval, Cover
else:
return sumval
############################################################################
# WriteNetCDF
def WriteNetCDF(Filename,TheGArray,TheNHArray,TheTArray,TheSHArray,TheTimes,TheStYr, TheEdYr, TheClimStart, TheClimEnd, TheName, TheStandardName, TheLongName, TheUnit, TheRegions):
'''
This function writes out a NetCDF 4 file
INPUTS:
Filename - string file name
TheGArray[:] - time array of global average values
TheNHArray[:] - time array of nhem average values
TheTArray[:] - time array of tropical average values
TheSHArray[:] - time array of shem average values
TheTimes[:] - times in days since TheStYr, Jan 1st
TheStYr - integer start year assumes Jan start
TheEdYr - integer end year assumes Dec start
TheClimStart - integer start of clim Jan start
TheClimEnd - integer end of clim Dec start
TheName - string short name of var q2m
TheStandardName - string standard name of variable
TheUnit - string unit of variable
TheRegions - dictionary with G, NH, T and SH [lower lat, upper lat] boundaries
OUTPUTS:
None
'''
# No need to convert float data using given scale_factor and add_offset to integers - done within writing program (packV = (V-offset)/scale
# Not sure what this does to float precision though...
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw = Dataset(Filename+'.nc','w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Set up the dimension names and quantities
ncfw.createDimension('time',len(TheTimes))
# Go through each dimension and set up the variable and attributes for that dimension if needed
MyVarT = ncfw.createVariable('time','f4',('time',))
MyVarT.standard_name = 'time'
MyVarT.long_name = 'time'
MyVarT.units = 'days since '+str(TheStYr)+'-1-1 00:00:00'
MyVarT.start_year = str(TheStYr)
MyVarT.end_year = str(TheEdYr)
MyVarT[:] = TheTimes
# Go through each variable and set up the variable attributes
# I've added zlib=True so that the file is in compressed form
# I've added least_significant_digit=4 because we do not need to store information beyone 4 significant figures.
MyVarG = ncfw.createVariable('glob_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarG.standard_name = TheStandardName
MyVarG.long_name = TheLongName+' global average anomalies '+'%5.1f' % (TheRegions['G'][0])+' to '+'%5.1f' % (TheRegions['G'][1])
MyVarG.units = TheUnit
MyVarG.valid_min = np.min(TheGArray)
MyVarG.valid_max = np.max(TheGArray)
# MyVarG.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarG[:] = TheGArray[:]
MyVarN = ncfw.createVariable('nhem_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarN.standard_name = TheStandardName
MyVarN.long_name = TheLongName+' northern hemisphere average anomalies '+'%5.1f' % (TheRegions['NH'][0])+' to '+'%5.1f' % (TheRegions['NH'][1])
MyVarN.units = TheUnit
MyVarN.valid_min = np.min(TheNHArray)
MyVarN.valid_max = np.max(TheNHArray)
# MyVarN.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarN[:] = TheNHArray[:]
MyVarT = ncfw.createVariable('trop_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarT.standard_name = TheStandardName
MyVarT.long_name = TheLongName+' tropical average anomalies '+'%5.1f' % (TheRegions['T'][0])+' to '+'%5.1f' % (TheRegions['T'][1])
MyVarT.units = TheUnit
MyVarT.valid_min = np.min(TheTArray)
MyVarT.valid_max = np.max(TheTArray)
# MyVarT.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarT[:] = TheTArray[:]
MyVarS = ncfw.createVariable('shem_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarS.standard_name = TheStandardName
MyVarS.long_name = TheLongName+' southern hemisphere average anomalies '+'%5.1f' % (TheRegions['SH'][0])+' to '+'%5.1f' % (TheRegions['SH'][1])
MyVarS.units = TheUnit
MyVarS.valid_min = np.min(TheSHArray)
MyVarS.valid_max = np.max(TheSHArray)
# MyVarS.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarS[:] = TheSHArray[:]
ncfw.close()
return
############################################################################
# WriteText
def WriteText(Filename,TheGArray,TheNHArray,TheTArray,TheSHArray,TheTimes,TheStYr,TheEdYr):
'''
This function writes out two files with year or yearmonth and then Global, N Hemi, Tropics and S Hemi time series
There has to be at least 11 months of the year present to calculate an annual value
'''
# Minimum missing data threshold
MinThresh = 11
# Open the file for annual and monthly
ann = open(Filename+'_annual.dat', "a")
mon = open(Filename+'_monthly.dat', "a")
# Write the file header
ann.write("DATE GLOBAL N_HEMI TROPICS S_HEMI\n")
mon.write(" DATE GLOBAL N_HEMI TROPICS S_HEMI\n")
# Loop through each year and month and write out
yy = 0
mm = 0
for tt in range(len(TheTimes)):
# Write monthlies to file
m = '%02i' % (mm+1)
# pdb.set_trace()
mon.write('{:4d}{:2s} {:6.2f} {:6.2f} {:6.2f} {:6.2f}\n'.format(yy+TheStYr,m,TheGArray[tt],TheNHArray[tt],TheTArray[tt],TheSHArray[tt]))
mm = mm+1
if (mm == 12):
# Get annual mean value and write to file
TmpArr = TheGArray[tt-11:tt+1]
gots = np.where(TmpArr > mdi)
if (len(gots[0]) >= MinThresh):
TheGVal = np.mean(TmpArr[gots])
else:
TheGVal = mdi
TmpArr = TheNHArray[tt-11:tt+1]
gots = np.where(TmpArr > mdi)
if (len(gots[0]) >= MinThresh):
TheNHVal = np.mean(TmpArr[gots])
else:
TheNHVal = mdi
TmpArr = TheTArray[tt-11:tt+1]
gots = np.where(TmpArr > mdi)
if (len(gots[0]) >= MinThresh):
TheTVal = np.mean(TmpArr[gots])
else:
TheTVal = mdi
TmpArr = TheSHArray[tt-11:tt+1]
gots = np.where(TmpArr > mdi)
if (len(gots[0]) >= MinThresh):
TheSHVal = np.mean(TmpArr[gots])
else:
TheSHVal = mdi
ann.write('{:4d} {:6.2f} {:6.2f} {:6.2f} {:6.2f}\n'.format(yy+TheStYr,TheGVal, TheNHVal, TheTVal, TheSHVal))
yy = yy+1
mm = 0
# CLose the files
ann.close()
mon.close()
return
############################################################################
# MAIN #
############################################################################
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
var = 'q' # 'q','rh','e','td','tw','t','dpd'
typee = 'LAND' # 'LAND','RAW','OTHER', 'BLEND', 'BLENDSHIP', 'MARINE', 'MARINESHIP' # domain does not need to be set correctly!!!
# can also be 'ERA5' 'ERA5LAND','ERA5MARINE' 'ERA5MARINEMASK' ERA5LANDMASK'
year1 = '1973' # Start year of trend
year2 = '2018' # End year of trend
try:
opts, args = getopt.getopt(argv, "hi:",
["var=","typee=","year1=","year2="])
except getopt.GetoptError:
print('Usage (as strings) MakeGridTrends.py --var <q> --typee <IDPHA> --year1 <1973> --year2 <2018>')
sys.exit(2)
for opt, arg in opts:
if opt == "--var":
try:
var = arg
except:
sys.exit("Failed: var not a string")
elif opt == "--typee":
try:
typee = arg
except:
sys.exit("Failed: typee not a string")
elif opt == "--year1":
try:
year1 = arg
except:
sys.exit("Failed: year1 not an integer")
elif opt == "--year2":
try:
year2 = arg
except:
sys.exit("Failed: year2 not an integer")
assert year1 != -999 and year2 != -999, "Year not specified."
print(var,typee,year1, year2)
#****************** LONGER LIFE EDITABLES****************
# TWEAK ME!!!!
# Which start/end year of the complete dataset?
styr = 1973 # 1973
edyr = 2019
# *** CHOOSE WHETHER TO WORK WITH ANOMALIES OR ACTUALS - COULD ADD RENORMALISATION IF DESIRED ***
isanom = True # 'false' for actual values, 'true' for anomalies
# Which climatology period to work with?
climST = str(1981) #1976 or 1981
climED = str(2010) #2005 or 2010
climBIT = 'anoms'+climST[2:4]+climED[2:4]
# Which working file dates?
nowmon = 'JAN'
nowyear = '2020'
thenmon = 'JAN'
thenyear = '2020'
# What domain?
if (typee == 'MARINE') | (typee == 'MARINESHIP') | (typee == 'ERA5MARINE') | (typee == 'ERA5MARINEMASK'):
domain = 'marine'
version = '1.0.0.2019f'
elif (typee == 'BLEND') | (typee == 'BLENDSHIP') | (typee == 'ERA5') | (typee == 'ERA5MASK'):
domain = 'blend'
version = '1.0.0.2019f'
else:
domain = 'land'
version = '4.2.0.2019f'
# Set up the trend years
sttrd = int(year1)
edtrd = int(year2)
# Latitude and Longitude gridbox width and variable names
latlg = 5.
lonlg = 5.
#latlg = 1.
#lonlg = 1.
LatInfo = ['latitude']
LonInfo = ['longitude']
# SEt up area average masks
MaskDict = dict([('G',[-70.,70.]),
('NH',[20.,70.]),
('T',[-20.,20.]),
('SH',[-70.,-20.])])
# Time and dimension variables
# nyrs = (edyr+1)-styr
# nmons = nyrs*12
nyrs = (edtrd+1)-sttrd
nmons = nyrs*12
stlt = -90+(latlg/2.)
stln = -180+(lonlg/2.)
nlats = int(180/latlg)
nlons = int(360/lonlg)
lats = (np.arange(nlats)*latlg) + stlt
lons = (np.arange(nlons)*lonlg) + stln
WORKINGDIR = '/data/users/hadkw/WORKING_HADISDH/UPDATE20'+str(edyr)[2:4]
indir = WORKINGDIR+'/STATISTICS/GRIDS/'
outdir = WORKINGDIR+'/STATISTICS/TIMESERIES/'
# If we're working with ERA5 then set INDIR to OTHERDATA
if (typee.find('ERA5') >= 0):
indir = WORKINGDIR+'/OTHERDATA/'
indirH = WORKINGDIR+'/STATISTICS/GRIDS/'
# END OF EDITABLES**********************************************************
# Dictionaries for filename and other things
ParamDict = dict([('q',['q','q2m','g/kg']),
('rh',['RH','rh2m','%rh']),
('t',['T','t2m','deg C']),
('td',['Td','td2m','deg C']),
('tw',['Tw','tw2m','deg C']),
('e',['e','e2m','hPa']),
('dpd',['DPD','dpd2m','deg C']),
('evap',['q','evap','cm w.e.'])])
# Dictionary for looking up variable standard (not actually always standard!!!) names for netCDF output of variables
NameDict = dict([('q',['specific_humidity',' decadal trend in specific humidity anomaly ('+climST+' to '+climED+' base period)']),
('rh',['relative_humidity',' decadal trend in relative humidity anomaly ('+climST+' to '+climED+' base period)']),
('e',['vapour_pressure',' decadal trend in vapour pressure anomaly ('+climST+' to '+climED+' base period)']),
('tw',['wetbulb_temperature',' decadal trend in wetbulb temperature anomaly ('+climST+' to '+climED+' base period)']),
('t',['drybulb_temperature',' decadal trend in dry bulb temperature anomaly ('+climST+' to '+climED+' base period)']),
('td',['dewpoint_temperature',' decadal trend in dew point temperature anomaly ('+climST+' to '+climED+' base period)']),
('dpd',['dewpoint depression',' decadal trend in dew point depression anomaly ('+climST+' to '+climED+' base period)']),
('evap',['evaporation',' decadal trend in evaporation anomaly ('+climST+' to '+climED+' base period)'])])
# unitees = ParamDict[param][2]
# varname = param
unitees = ParamDict[var][2]
varname = var
if domain == 'land':
DatTyp = 'IDPHA'
if (var == 'dpd'):
DatTyp = 'PHA'
if (var == 'td'):
DatTyp = 'PHADPD'
fileblurb = 'FLATgrid'+DatTyp+'5by5'
elif domain == 'marine':
if (typee == 'MARINE'):
fileblurb = 'BClocal5by5both'
elif (typee == 'MARINESHIP') | (typee == 'ERA5MARINEMASK') | (typee == 'ERA5MARINE'):
fileblurb = 'BClocalSHIP5by5both'
elif domain == 'blend':
DatTyp = 'IDPHA'
if (var == 'dpd'):
DatTyp = 'PHA'
if (var == 'td'):
DatTyp = 'PHADPD'
if (typee == 'BLEND'):
fileblurb = 'FLATgrid'+DatTyp+'BClocalboth5by5'
elif (typee == 'BLENDSHIP') | (typee == 'ERA5MASK') | (typee == 'ERA5'):
fileblurb = 'FLATgrid'+DatTyp+'BClocalSHIPboth5by5'
inlandcover = WORKINGDIR+'/OTHERDATA/HadCRUT.4.3.0.0.land_fraction.nc'
# infile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_cf'
infile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+thenmon+thenyear+'_cf'
outfile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
if (typee.find('ERA5') >= 0):
infile = var+'2m_monthly_5by5_ERA5_1979'+str(edyr)
outfile = var+'2m_monthly_5by5_ERA5_'+climBIT+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
# infileH = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_cf'
infileH = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+thenmon+thenyear+'_cf'
outfileH = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
# Removed the nowmonnowyear thenmonthenyear bits
# infile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+thenmon+thenyear+'_cf'
# outfile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+nowmon+nowyear+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
if (isanom == False):
outfile = outfile+'_ABS'
# Get Data
if (typee.find('ERA') >= 0):
styrh = np.copy(styr)
styr = 1979
if (isanom == True):
if (domain == 'land'):
ReadInfo = [var+'2m_anoms_land','time']
outfile = outfile+'_land'
if (domain == 'marine'):
ReadInfo = [var+'2m_anoms_ocean','time']
outfile = outfile+'_marine'
if (domain == 'blend'):
ReadInfo = [var+'2m_anoms','time']
ReadInfoH = [var+'_anoms','time']
else:
ReadInfo = [var+'2m','time']
ReadInfoH = [var+'_abs','time']
else:
if (isanom == True):
ReadInfo = [var+'_anoms','time']
else:
ReadInfo = [var+'_abs','time']
## read in files
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#
#if (isanom == True):
# if (homogtype == 'ERA-Interim') | (homogtype == 'ERA5'):
# if (domain == 'land'):
# ReadInfo = [varname+'_anoms_land','time']
# outfile = outfile+'_land'
# if (domain == 'marine'):
# ReadInfo = [varname+'_anoms_ocean','time']
# outfile = outfile+'_marine'
# else:
# ReadInfo = [varname+'_anoms','time']
#else:
# if (homogtype == 'ERA-Interim') | (homogtype == 'ERA5'):
# if (domain == 'land'):
# ReadInfo = [varname+'_land','time']
# outfile = outfile+'_land'
# if (domain == 'marine'):
# ReadInfo = [varname+'_ocean','time']
# outfile = outfile+'_land'
# else:
# ReadInfo = [varname+'_abs','time']#
#
print('Reading in the data for :',typee)
#print('Reading in the data for :',homogtype)
# TmpVals,Latitudes,Longitudes = GetGrid4(infile,ReadInfo,LatInfo,LonInfo)
TmpVals,Latitudes,Longitudes = GetGrid4(indir+infile+'.nc',ReadInfo,LatInfo,LonInfo)
# Seperate out data and times
TheData = TmpVals[0]
Times = TmpVals[1]
TmpVals = []
# Check the mdis = IDL output netCDF differs from Python output
bads = np.where(TheData < -10000)
if (len(bads[0]) > 0):
TheData[bads] = mdi
# If we're masking ERA then read in HadISDH
if (typee.find('MASK') >= 0):
print('Masking ERA5')
outfile = outfile+'_mask'
TmpValsH,LatitudesH,LongitudesH = GetGrid4(indirH+infileH+'.nc',ReadInfoH,LatInfo,LonInfo)
# Seperate out data and times
TheDataH = TmpValsH[0]
TimesH = TmpValsH[1]
TmpValsH = []
# Check the mdis = IDL output netCDF differs from Python output
bads = np.where(TheDataH < -10000)
if (len(bads[0]) > 0):
TheDataH[bads] = mdi
# Make HadISDH start in the same years
TheDataH = TheDataH[(styr-styrh)*12:((edyr-styrh) + 1)*12,:,:]
# Now mask the ERA data with HadISDH missing data
TheData[np.where(TheDataH == mdi)] = mdi
## Now if we're masking then read in the mask for the time slice of ERA-Interim
#if (mask == True):
#
# SliceInfo = dict([('TimeSlice',[mskstpt,mskedpt]),
# ('LatSlice',[0,nlats]),
# ('LonSlice',[0,nlons])])
#
# if (isanom == True):
# ReadInfo = [param+'_anoms']
# else:
# ReadInfo = [param+'_abs']#
#
print('Reading in the mask data for :',typee)
# print('Reading in the mask data for :',homogtype)
# TmpVals,Latitudes,Longitudes = GetGrid4Slice(maskfile,ReadInfo,SliceInfo,LatInfo,LonInfo)
#
# # Seperate out data and times
# MSKTheData = TmpVals
## MSKTimes = TmpVals[1]
# TmpVals = []
#
# # Check the mdis = IDL output netCDF differs from Python output
# bads = np.where(MSKTheData < -10000)
# if (len(bads[0]) > 0):
# MSKTheData[bads] = mdi
#
# # mask out points in candidate that do not have data in the mask
# bads = np.where(MSKTheData <= mdi)
## pdb.set_trace()
# if (len(bads[0]) > 0):
# TheData[bads] = mdi
## # make anomalies from the monthlies if you want to be precise about anomalising with same coverage as HadISDH
## newq_values=make_array(nlons,nlats,nmons,/float,value=mdi)
## FOR ltt=0,nlats-1 DO BEGIN
## FOR lnn=0,nlons-1 DO BEGIN
## subarr=REFORM(q_values(lnn,ltt,*),12,nyrs)
## FOR mm=0,11 DO BEGIN
## gots=WHERE(subarr(mm,*) NE mdi,count)
## climsub=subarr(mm,mclimst-styr:mclimst-styr)
## gotsC=WHERE(climsub NE mdi,countC)
## IF (countC GE 15) THEN subarr(mm,gots)=subarr(mm,gots)-MEAN(climsub(gotsC)) ELSE subarr(mm,*)=mdi
## ENDFOR
## newq_values(lnn,ltt,*)=REFORM(subarr,nmons)
## ENDFOR
## ENDFOR
## #stop
## q_values=newq_values
# make spatial area masks - set anything greater than 70 deg lat to mdi
global_mask = np.zeros((nlats,nlons),dtype = float)
global_mask.fill(1)
nhem_mask = np.copy(global_mask)
shem_mask = np.copy(global_mask)
trop_mask = np.copy(global_mask)
for deg in range(nlats):
if (lats[deg] < MaskDict['G'][0]) | (lats[deg] > MaskDict['G'][1]):
global_mask[deg,:] = mdi
if (lats[deg] < MaskDict['NH'][0]) | (lats[deg] > MaskDict['NH'][1]):
nhem_mask[deg,:] = mdi
if (lats[deg] < MaskDict['T'][0]) | (lats[deg] > MaskDict['T'][1]):
trop_mask[deg,:] = mdi
if (lats[deg] < MaskDict['SH'][0]) | (lats[deg] > MaskDict['SH'][1]):
shem_mask[deg,:] = mdi
global_mask_3d = np.repeat(global_mask[np.newaxis,:,:],nmons, axis = 0)
nhem_mask_3d = np.repeat(nhem_mask[np.newaxis,:,:],nmons, axis = 0)
shem_mask_3d = np.repeat(shem_mask[np.newaxis,:,:],nmons, axis = 0)
trop_mask_3d = np.repeat(trop_mask[np.newaxis,:,:],nmons, axis = 0)
##CoverTS = np.empty(nmons,dtype = float)
##CoverTS.fill(mdi)
##glob_avg_ts,CoverTS = AreaMean(TheData,lats,global_mask_3d,CoverTS)
glob_avg_ts = AreaMean(TheData,lats,global_mask_3d)
print(len(glob_avg_ts),np.max(glob_avg_ts),np.min(glob_avg_ts))
#pdb.set_trace()
nhem_avg_ts = AreaMean(TheData,lats,nhem_mask_3d)
print(len(nhem_avg_ts),np.max(nhem_avg_ts),np.min(nhem_avg_ts))
trop_avg_ts = AreaMean(TheData,lats,trop_mask_3d)
print(len(trop_avg_ts),np.max(trop_avg_ts),np.min(trop_avg_ts))
shem_avg_ts = AreaMean(TheData,lats,shem_mask_3d)
print(len(shem_avg_ts),np.max(shem_avg_ts),np.min(shem_avg_ts))
# save to file as netCDF and .dat
# WriteNetCDF(outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr, climst, climed, ParamDict[param][0], StandardNameDict[param], LongNameDict[param], unitees, MaskDict)
WriteNetCDF(outdir+outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr, climST, climED, ParamDict[var][0], NameDict[var][0], NameDict[var][1], unitees, MaskDict)
# WriteText(outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr)
WriteText(outdir+outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr)
# Note if any of the series have missing data because at these large scales they should not
if (len(np.where(glob_avg_ts <= mdi)[0]) > 0):
print('Missing months for Global average: ',len(np.where(glob_avg_ts <= mdi)[0]))
pdb.set_trace()
if (len(np.where(nhem_avg_ts <= mdi)[0]) > 0):
print('Missing months for NHemi average: ',len(np.where(nhem_avg_ts <= mdi)[0]))
pdb.set_trace()
if (len(np.where(trop_avg_ts <= mdi)[0]) > 0):
print('Missing months for Tropics average: ',len(np.where(trop_avg_ts <= mdi)[0]))
pdb.set_trace()
if (len(np.where(shem_avg_ts <= mdi)[0]) > 0):
print('Missing months for Shemi average: ',len(np.where(shem_avg_ts <= mdi)[0]))
pdb.set_trace()
print('And we are done!')
if __name__ == '__main__':
main(sys.argv[1:])
| cc0-1.0 |
evgchz/scikit-learn | sklearn/datasets/species_distributions.py | 19 | 7870 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
pavlovml/tensorflow | tensorflow/python/client/notebook.py | 5 | 3918 | """Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
nal-epfl/line-sigcomm14 | plotting-scripts-new/plot-edge-seq-cong-prob.py | 1 | 9288 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Install dependencies:
# sudo pip install matplotlib
import colorsys
import copy
import getopt
import json
from nicePlot import nicePlot
from line import *
import math
import numpy
import os
import pprint
import re
import subprocess
import sys
## Params
inputFile = 'edge-seq-cong-prob.txt'
outputDir = 'edge-seq-cong-prob'
natural = True
removeRedundant = False
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['in=', 'out=', 'natural', 'removeRedundant'])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
for opt, arg in opts:
if opt == '--in':
inputFile = arg
elif opt == '--out':
outputDir = arg
elif opt == '--natural':
natural = True
elif opt == '--removeRedundant':
removeRedundant = True
else:
assert False, "Unhandled option: " + str(opt)
print 'Arguments:'
print 'Input file:', inputFile
print 'Output dir:', outputDir
## End of params
data = readSeqData(inputFile, removeRedundant, natural)
## End of data reading
## Plot
try:
os.makedirs(outputDir)
except OSError as exception:
pass
# Cleanup target dir
print 'Cleaning up target directory (%s)...' % outputDir
args = ['bash', '-c', '''cd '%s' && ls -1 | grep -E '.*.(pdf|png|json|eps)$' | while read -r f ; do rm -fv "$f" ; done || echo "Could not change dir to %s"''' % (outputDir, outputDir) ]
print(args)
subprocess.call(args)
# [left, bottom, width, height]
figureBorder = [0.10, 0.20, 0.80, 0.70]
probMax = 2.0
for e in range(len(data.linkSeqLinks)):
print data.linkSeqLinks[e]
for c in data.linkSeqClassValues[e].keys():
for prob in data.linkSeqClassValues[e][c]:
probMax = max(probMax, prob)
for prob in data.linkSeqClassTruth[e][c]:
probMax = max(probMax, prob)
probMax2 = int(probMax * 100) / 100.0
if probMax2 - probMax < 0.4:
probMax2 = probMax2 + 0.5
probMax = probMax2
for graph in ["truth", "inferred"]:
plot = {}
#plot['title'] = '{0} link sequence: {1}'.format(linkSeqTypes[e].capitalize(), ' '.join([str(x) for x in linkSeqLinks[e]]))
plot['xLabel'] = 'Link sequence'
plot['yLabel'] = 'Probability of congestion\n({}) [\\%]'.format(graph)
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0.5
plot['xmax'] = 0.5
plot['ymin'] = -0.5
plot['ymax'] = probMax
plot['minorXTicks'] = 0
plot['majorXTicks'] = 0
plot['noLegend'] = 1
#
plot['data'] = []
plot['xTicks'] = []
plot['xTickLabels'] = []
plot['xTickRotation'] = 30
plot['xTicks'].append(0)
plot['xTickLabels'].append('')
plot['majorXTicks'] = plot['majorXTicks'] + 1
majorIndex = 1
for e in data.sortedSequences:
#plot['xTickLabels'].append(', '.join([str(link) + ('' if link not in data.nonNeutralLinks else '*') for link in data.linkSeqLinks[e]]))
plot['xTickLabels'].append(', '.join(['\\textcolor[rgb]{' + ('0,0,0' if link not in data.nonNeutralLinks else '0.7,0,0') + '}{' + str(link) + '}' for link in data.linkSeqLinks[e]]))
plot['majorXTicks'] = plot['majorXTicks'] + 1
plot['xmax'] = plot['xmax'] + 1
box = {}
box['type'] = 'boxplot'
box['x'] = []
box['positions'] = []
box['w'] = 0.25
color = [0.618, 0.75, 0.95] if data.linkSeqTypes[e] == 'neutral' else [0.0, 0.75, 0.95]
box['boxColor'] = color
box['medianColor'] = color
box['whiskerColor'] = color
box['capColor'] = color
box['outlierColor'] = color
box['outlierMarker'] = '+'
box['lineWidth'] = 1.0
minorOffset = - box['w'] / 2.0
for c in naturalSorted(data.linkSeqClassValues[e].keys()):
if graph == "inferred":
box['x'].append(data.linkSeqClassValues[e][c])
elif graph == "truth":
box['x'].append(data.linkSeqClassTruth[e][c])
elif graph == "delta":
box['x'].append([data.linkSeqClassValues[e][c][i] - data.linkSeqClassTruth[e][c][i] for i in range(len(data.linkSeqClassValues[e][c]))])
elif graph == "errorPath1":
box['x'].append(data.linkSeqClassErrorPath1[e][c])
elif graph == "errorPath2":
box['x'].append(data.linkSeqClassErrorPath2[e][c])
elif graph == "errorExternal":
box['x'].append(data.linkSeqClassErrorExternal[e][c])
elif graph == "errorInternal":
box['x'].append(data.linkSeqClassErrorInternal[e][c])
elif graph == "errorInternal1":
box['x'].append(data.linkSeqClassErrorInternal1[e][c])
elif graph == "errorInternal2":
box['x'].append(data.linkSeqClassErrorInternal2[e][c])
else:
raise Exception("oops")
box['positions'].append(majorIndex + minorOffset)
minorOffset = minorOffset + box['w']
plot['xTicks'].append(majorIndex)
plot['data'].append({
"type": "line",
"y": [min(-10.0, plot['ymin']), max(110.0, plot['ymax'])],
"x": [majorIndex - 0.5, majorIndex - 0.5],
"pattern": ":",
"label": "",
"color": [0.0, 0.0, 0.7],
"lineWidth": 1.0
})
plot['data'].append(box)
majorIndex = majorIndex + 1
plot['majorXTicks'] = plot['majorXTicks'] + 1
for y in range(0 if not graph.startswith("error") and graph != "delta" else -100, 101, 25 if graph.startswith("error") else 1 if plot['ymax'] < 8 else 2 if plot['ymax'] < 16 else 5 if plot['ymax'] < 40 else 10):
plot['data'].insert(0, {
"type": "line",
"x": [plot['xmin']-10, plot['xmax']+10],
"y": [y, y],
"pattern": ":",
"label": "",
"color": [0.0, 0.0, 0.7],
"lineWidth": 1.0
})
plot['w'] = 2 + len(data.sortedSequences) / 2
plot['h'] = 3
if graph.startswith("error"):
plot['ymin'] = -110
plot['ymax'] = 110
elif graph == "delta":
plot['ymin'] = -probMax
plot['ymax'] = probMax
plot['fileName'] = outputDir + '/' + 'link-seq-cong-prob-all-' + graph + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
#plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f, sort_keys=True, indent=4, separators=(',', ': '))
nicePlot(plot)
print plot['fileName']
for graph in ["truth", "inferred"]:
plot = {}
#plot['title'] = '{0} link sequence: {1}'.format(data.linkSeqTypes[e].capitalize(), ' '.join([str(x) for x in data.linkSeqLinks[e]]))
plot['xLabel'] = 'Probability of congestion\n({}) [\\%]'.format(graph)
plot['yLabel'] = 'Frequency'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['noLegend'] = 1
#
plot['data'] = []
histogram = {
"type": "histogram",
"x": [[], []],
"colors": [[0.618, 0.75, 0.95], [0.0, 0.75, 0.95]]
}
if graph == "inferred" or graph == "truth" or graph == "truthSinglePath":
if probMax < 4:
histogram["bins"] = [0.2 * x for x in range(int(math.ceil(probMax * 5)))]
elif probMax < 10:
histogram["bins"] = [0.5 * x for x in range(int(math.ceil(probMax * 2)))]
elif probMax < 20:
histogram["bins"] = [1.0 * x for x in range(int(math.ceil(probMax)))]
elif probMax < 40:
histogram["bins"] = [2.0 * x for x in range(int(math.ceil(probMax * 0.5)))]
else:
histogram["bins"] = [5.0 * x for x in range(int(math.ceil(probMax * 0.2)))]
elif graph == "delta" or graph.startswith("error"):
histogram["nBins"] = 20
for e in data.sortedSequences:
index = 0 if data.linkSeqTypes[e] == 'neutral' else 1
for c in naturalSorted(data.linkSeqClassValues[e].keys()):
if graph == "inferred":
for prob in data.linkSeqClassValues[e][c]:
histogram['x'][index].append(prob)
elif graph == "truth":
for prob in data.linkSeqClassTruth[e][c]:
histogram['x'][index].append(prob)
elif graph == "delta":
for prob in [data.linkSeqClassValues[e][c][i] - data.linkSeqClassTruth[e][c][i] for i in range(len(data.linkSeqClassValues[e][c]))]:
histogram['x'][index].append(prob)
elif graph == "errorPath1":
for prob in data.linkSeqClassErrorPath1[e][c]:
histogram['x'][index].append(prob)
elif graph == "errorPath2":
for prob in data.linkSeqClassErrorPath2[e][c]:
histogram['x'][index].append(prob)
elif graph == "errorExternal":
for prob in data.linkSeqClassErrorExternal[e][c]:
histogram['x'][index].append(prob)
elif graph == "errorInternal":
for prob in data.linkSeqClassErrorInternal[e][c]:
histogram['x'][index].append(prob)
elif graph == "errorInternal1":
for prob in data.linkSeqClassErrorInternal1[e][c]:
histogram['x'][index].append(prob)
elif graph == "errorInternal2":
for prob in data.linkSeqClassErrorInternal2[e][c]:
histogram['x'][index].append(prob)
else:
raise Exception("oops")
plot['data'].append(histogram)
plot['w'] = 14
plot['h'] = 3
plot['fileName'] = outputDir + '/' + 'link-seq-cong-prob-all-' + graph + '-histogram.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
#plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f, sort_keys=True, indent=4, separators=(',', ': '))
nicePlot(plot)
print plot['fileName']
| gpl-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.py | 4 | 34938 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46,
4.43,0.21,4.75,0.71,1.52,3.24,
0.93,0.42,4.97,9.53,4.55,0.47,6.66]
w,pw = stats.shapiro(x1)
assert_almost_equal(w,0.90047299861907959,6)
assert_almost_equal(pw,0.042089745402336121,6)
x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11,
3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69,
0.08,3.67,2.81,3.49]
w,pw = stats.shapiro(x2)
assert_almost_equal(w,0.9590270,6)
assert_almost_equal(pw,0.52460,3)
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
class TestAnsari(TestCase):
def test_small(self):
x = [1,2,3,3,4]
y = [3,2,6,1,6,1,4,1]
W, pval = stats.ansari(x,y)
assert_almost_equal(W,23.5,11)
assert_almost_equal(pval,0.13499256881897437,11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W,185.5,11)
assert_almost_equal(pval,0.18145819972867083,11)
def test_exact(self):
W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12])
assert_almost_equal(W,10.0,11)
assert_almost_equal(pval,0.533333333333333333,7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T,20.78587342806484,7)
assert_almost_equal(pval,0.0136358632781,7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W,1.7059176930008939,7)
assert_almost_equal(pval,0.0990829755522,7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1,1,21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100,250)
assert_almost_equal(pval,0.0018833009350757682,11)
pval = stats.binom_test(201,405)
assert_almost_equal(pval,0.92085205962670713,11)
pval = stats.binom_test([682,243],p=3.0/4)
assert_almost_equal(pval,0.38249155957481695,11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1,2,3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
class TestFindRepeats(TestCase):
def test_basic(self):
a = [1,2,3,4,1,2,3,4,1,2,5]
res,nums = stats.find_repeats(a)
assert_array_equal(res,[1,2,3,4])
assert_array_equal(nums,[3,3,2,2])
def test_empty_result(self):
# Check that empty arrays are returned when there are no repeats.
a = [10, 20, 50, 30, 40]
repeated, counts = stats.find_repeats(a)
assert_array_equal(repeated, [])
assert_array_equal(counts, [])
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1,x1**2),
(3.2282229927203536, 0.072379187848207877), 11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478), 11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
#Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1,2])
assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy")
def test_mvsdist_bad_arg():
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_kstat_bad_arg():
# Raise ValueError if n > 4 or n > 1.
data = [1]
n = 10
assert_raises(ValueError, stats.kstat, data, n=n)
def test_kstatvar_bad_arg():
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
def test_ppcc_max_bad_arg():
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1] , 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355,5,2,359,10,350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20,21,22,18,19,20.5,19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355,5,2,359,10,350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
if __name__ == "__main__":
run_module_suite()
| agpl-3.0 |
wjlei1990/spaceweight | setup.py | 1 | 1886 | import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name="spaceweight",
version="0.0.1",
license='GNU Lesser General Public License, version 3 (LGPLv3)',
description="Python tools for geographical spatial weighting",
author="Wenjie Lei",
author_email="lei@princeton.edu",
url="https://github.com/wjlei1990/spaceweight",
packages=find_packages("src"),
package_dir={"": "src"},
tests_require=['pytest'],
cmdclass={'test': PyTest},
zip_safe=False,
classifiers=[
# complete classifier list:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)"
],
keywords=[
"geographic", "space", "weighting strategy"
],
install_requires=[
"numpy", "obspy>=1.0.0", "flake8>=3.0", "pytest", "nose",
"future>=0.14.1", "scipy", "matplotlib"
],
extras_require={
"docs": ["sphinx"]
}
)
| gpl-3.0 |
shakeh/bridge | res-plotting/Residual_Plotting.py | 2 | 2481 | # Residual_Plotting.py
# A script that takes in an Output_for_plotting_[PULSAR].tim file created by Average_Epochs.py, and an output directory
# as a result, it creates a plot of pulsar residuals, stored in the output directory
# sample input:
# python Residual_Plotting.py /Users/fkeri/Desktop/Output_for_plotting_B1855+09_NANOGrav_9yv0.tim /Users/fkeri/Desktop/
# we can see that it takes in 2 line arguments: [INPUT FILE], [OUTPUT DIRECTORY]
# the output file will have the name of the pulsar: "1713+0747.png"
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
import matplotlib.pyplot as plt
import sys
import numpy
import jdcal
def mjd2year( X ):
year = jdcal.jd2jcal(2400000.5, X+13)
return float( year[0] )+( ( year[1]-1 )*30+year[2] )/365.0
#Opens the residual file
fawes=open(sys.argv[1], "r")
psrname = fawes.readline()
lines_after_7=fawes.readlines()[7: ] #Skip the text lines
#Create a list from data
L=[]
#print lines_after_7[0:2]
for i in range(0, len(lines_after_7)):
L.append(str(lines_after_7[i]).split('\t'))
#print float(L[i][0])
#print L[0]
#print float(L[0][0])
#Creates x and y lists
X=[[] for i in range(5)]
er=[[] for i in range(5)]
Y=[[] for i in range (5)]
clrs=['green', 'red', 'blue', 'orange', 'yellow']
freq=[[100, 360], [361, 601], [601, 999], [1000, 2100], [2101, 3000]]
band_p=[]
bandf=['100-360 MHz','361-601 MHz','601-999 MHz', '1000-2100 MHz', '2101-3000 MHz']
bandf_p=[]
#Get X, Y and Er lista for each frequency band
for j in range(5):
for i in range(0, len(lines_after_7)):
#print L[i][0]
if float(L[i][2])>=freq[j][0] and float(L[i][2])<=freq[j][1]:
X[j].append(float( mjd2year( float( L[i][0] ) ) ) )
Y[j].append(float(L[i][1])*10**6)
er[j].append(float(L[i][3]))
fawes.close()
#Ploting of the residuals for the 5 bandwidths
for i in range (5):
if X[i]:
band_i,=plt.plot(X[i], Y[i], marker='o', color=clrs[i], linestyle='none')
plt.errorbar(X[i], Y[i], yerr=er[i], ls='none', color=clrs[i])
band_p.append(band_i)
bandf_p.append(bandf[i])
x_formatter = ScalarFormatter(useOffset=False)
ax = plt.gca()
ax.get_xaxis().get_major_formatter().set_useOffset(False)
save_path = sys.argv[2]
plt.title( "Pulsar "+psrname[:len(psrname)-1] )
plt.legend(band_p, bandf_p)
plt.axhline(0, color='blue', linestyle='--')
plt.xlabel("TOAs(years)", fontsize=14, color="black")
plt.ylabel("Residuals ($\mu$s)", fontsize=14, color="black")
plt.savefig( save_path+psrname[:len(psrname)-1]+".png" )
#plt.clf()
| apache-2.0 |
srjoglekar246/sympy | examples/intermediate/sample.py | 3 | 3446 | """
Utility functions for plotting sympy functions.
See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d
graphing functions using matplotlib.
"""
from numpy import repeat, arange, empty, ndarray, array
from sympy import Symbol, Basic, Rational, I, sympify
def sample2d(f, x_args):
"""
Samples a 2d function f over specified intervals and returns two
arrays (X, Y) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot2d.py.
f is a function of one variable, such as x**2.
x_args is an interval given in the form (var, min, max, n)
"""
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except AttributeError:
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = arange(float(x_min), float(x_max)+x_d, x_d)
Y = empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y
def sample3d(f, x_args, y_args):
"""
Samples a 3d function f over specified intervals and returns three
2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot3d.py.
f is a function of two variables, such as x**2 + y**2.
x_args and y_args are intervals given in the form (var, min, max, n)
"""
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except AttributeError:
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = arange(float(x_min), float(x_max)+x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = arange(float(y_min), float(y_max)+y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = array(x)
y = array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = repeat(x, numRows, 0)
y.shape = numRows, 1
Y = repeat(y, numCols, 1)
return X, Y
X, Y = meshgrid(x_a, y_a)
Z = ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float( f.subs(x, X[j][k]).subs(y, Y[j][k]) )
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z
def sample(f, *var_args):
"""
Samples a 2d or 3d function over specified intervals and returns
a dataset suitable for plotting with matlab (matplotlib) syntax.
Wrapper for sample2d and sample3d.
f is a function of one or two variables, such as x**2.
var_args are intervals for each variable given in the form (var, min, max, n)
"""
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
| bsd-3-clause |
tomasreimers/tensorflow-emscripten | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 18 | 13185 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tempfile
# pylint: disable=g-bad-todo
# TODO(#6568): Remove this hack that makes dlopen() not crash.
# pylint: enable=g-bad-todo
# pylint: disable=g-import-not-at-top
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
markovg/nest-simulator | pynest/nest/voltage_trace.py | 18 | 7823 | # -*- coding: utf-8 -*-
#
# voltage_trace.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to plot voltage traces.
"""
import nest
import numpy
import pylab
def from_file(fname, title=None, grayscale=False):
"""Plot voltage trace from file.
Parameters
----------
fname : str or list
Filename or list of filenames to load from
title : str, optional
Plot title
grayscale : bool, optional
Plot in grayscale
Raises
------
ValueError
"""
if nest.is_iterable(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
if grayscale:
line_style = "k"
else:
line_style = ""
if len(data.shape) == 1:
print("INFO: only found 1 column in the file. \
Assuming that only one neuron was recorded.")
plotid = pylab.plot(data, line_style)
pylab.xlabel("Time (steps of length interval)")
elif data.shape[1] == 2:
print("INFO: found 2 columns in the file. Assuming \
them to be gid, pot.")
plotid = []
data_dict = {}
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[1]]
else:
data_dict[d[0]].append(d[1])
for d in data_dict:
plotid.append(
pylab.plot(data_dict[d], line_style, label="Neuron %i" % d)
)
pylab.xlabel("Time (steps of length interval)")
pylab.legend()
elif data.shape[1] == 3:
plotid = []
data_dict = {}
g = data[0][0]
t = []
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[2]]
else:
data_dict[d[0]].append(d[2])
if d[0] == g:
t.append(d[1])
for d in data_dict:
plotid.append(
pylab.plot(t, data_dict[d], line_style, label="Neuron %i" % d)
)
pylab.xlabel("Time (ms)")
pylab.legend()
else:
raise ValueError("Inappropriate data shape %i!" % data.shape)
if not title:
title = "Membrane potential from file '%s'" % fname
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
pylab.draw()
return plotid
def from_device(detec, neurons=None, title=None, grayscale=False,
timeunit="ms"):
"""Plot the membrane potential of a set of neurons recorded by
the given Voltmeter or Multimeter.
Parameters
----------
detec : list
Global id of Voltmeter or Multimeter in a list, e.g. [1]
neurons : list, optional
Indices of of neurons to plot
title : str, optional
Plot title
grayscale : bool, optional
Plot in grayscale
timeunit : str, optional
Unit of time
Raises
------
nest.NESTError
Description
"""
if len(detec) > 1:
raise nest.NESTError("Please provide a single voltmeter.")
if not nest.GetStatus(detec)[0]['model'] in ('voltmeter', 'multimeter'):
raise nest.NESTError("Please provide a voltmeter or a \
multimeter measuring V_m.")
elif nest.GetStatus(detec)[0]['model'] == 'multimeter':
if "V_m" not in nest.GetStatus(detec, "record_from")[0]:
raise nest.NESTError("Please provide a multimeter \
measuring V_m.")
elif (not nest.GetStatus(detec, "to_memory")[0] and
len(nest.GetStatus(detec, "record_from")[0]) > 1):
raise nest.NESTError("Please provide a multimeter measuring \
only V_m or record to memory!")
if nest.GetStatus(detec, "to_memory")[0]:
timefactor = 1.0
if not nest.GetStatus(detec)[0]['time_in_steps']:
if timeunit == "s":
timefactor = 1000.0
else:
timeunit = "ms"
times, voltages = _from_memory(detec)
if not len(times):
raise nest.NESTError("No events recorded! Make sure that \
withtime and withgid are set to True.")
if neurons is None:
neurons = voltages.keys()
plotids = []
for neuron in neurons:
time_values = numpy.array(times[neuron]) / timefactor
if grayscale:
line_style = "k"
else:
line_style = ""
try:
plotids.append(
pylab.plot(time_values, voltages[neuron],
line_style, label="Neuron %i" % neuron)
)
except KeyError:
print("INFO: Wrong ID: {0}".format(neuron))
if not title:
title = "Membrane potential"
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
if nest.GetStatus(detec)[0]['time_in_steps']:
pylab.xlabel("Steps")
else:
pylab.xlabel("Time (%s)" % timeunit)
pylab.legend(loc="best")
pylab.draw()
return plotids
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, grayscale)
else:
raise nest.NESTError("Provided devices neither records to file, \
nor to memory.")
def _from_memory(detec):
"""Get voltage traces from memory.
----------
detec : list
Global id of Voltmeter or Multimeter
"""
import array
ev = nest.GetStatus(detec, 'events')[0]
potentials = ev['V_m']
senders = ev['senders']
v = {}
t = {}
if 'times' in ev:
times = ev['times']
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = array.array('f')
v[currentsender].append(float(potentials[s]))
t[currentsender].append(float(times[s]))
else:
# reconstruct the time vector, if not stored explicitly
detec_status = nest.GetStatus(detec)[0]
origin = detec_status['origin']
start = detec_status['start']
interval = detec_status['interval']
senders_uniq = numpy.unique(senders)
num_intvls = len(senders) / len(senders_uniq)
times_s = origin + start + interval + \
interval * numpy.array(range(num_intvls))
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = times_s
v[currentsender].append(float(potentials[s]))
return t, v
def show():
"""Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
| gpl-2.0 |
dalejung/trtools | trtools/core/select.py | 1 | 2914 | """
Collections of tools to quickly select rows/items
"""
import collections
import warnings
import numpy as np
from pandas import Panel, DataFrame, MultiIndex, Series, Timestamp
from pandas.core.indexing import _IXIndexer
from trtools.monkey import patch, patch_prop
@patch(DataFrame, 'cols')
def _cols(self, *args):
return self.xs(list(args), axis=1)
@patch([DataFrame, Series])
def selectone(self, func):
"""
A wrapper around select that only returns the first value
"""
vals = self.select(func)
if len(vals) > 0:
return self.ix[vals.index[0]]
@patch(Series, 'show')
def show(self, val):
"""
show(val)
show all rows matching a value
val can be a value or a func.
"""
if isinstance(val, collections.Callable):
func = np.vectorize(val)
bools = func(self)
else:
bools = self == val
return self[bools]
@patch_prop([Panel, DataFrame, Series], 'rx')
def rx(self):
"""
For grabbing row-wise which means the axis that a DatetimeIndex would
normally be found
"""
warnings.warn(".rx is deprecated in favor of .iloc")
return self.iloc
@patch([DataFrame, Series])
def pluck(df, target, buffer=2):
if not isinstance(target, int):
try:
target = df.index.get_loc(target)
except:
raise Exception("%s not in index" % target)
lower = max(0, target - buffer)
higher = min(len(df), target + buffer+1)
return df.ix[lower:higher]
def time_pluck(df, target, buffer=2, index=None):
from pandas.tseries.frequencies import to_offset
"""
Instead of an pos-int pluck, we create a datetime-span
"""
if isinstance(buffer, int):
buffer = "{0}D".format(buffer)
offset = to_offset(buffer)
start = Timestamp(target) - offset
end = Timestamp(target) + offset
if index is None:
index = df.index
filter = (index >= start) & (index <= end)
return df.ix[filter]
@patch([DataFrame], 'subset')
def subset(self, start, max=10):
"""
Select a window of data.
"""
if start < 0:
start = len(self) + start
end = start + max
return self.ix[start:end]
@patch([DataFrame], 'peek')
def peek(self, num_rows=5, max_col=None):
# need to access the pandas column logic instead of hardcoded five
max_col = max_col or 5
rows, cols = self.shape
num_rows = min(num_rows, rows)
max_col = min(max_col, cols)
return self.iloc[:num_rows, :max_col]
@patch([DataFrame], 'barf')
def barf(self, num_rows=5, max_col=None):
"""
Keep on running into issues where in notebook, I want to just show everything.
"""
from IPython.core.display import HTML
import pandas.core.config as config
config.set_option("display.max_columns", 1000)
h = HTML(self.to_html())
config.reset_option("display.max_columns")
return h
| mit |
magne-max/zipline-ja | zipline/data/bundles/yahoo.py | 1 | 6260 | import os
import numpy as np
import pandas as pd
from pandas_datareader.data import DataReader
import requests
from zipline.utils.cli import maybe_show_progress
from .core import register
def _cachpath(symbol, type_):
return '-'.join((symbol.replace(os.path.sep, '_'), type_))
def yahoo_equities(symbols, start=None, end=None):
"""Create a data bundle ingest function from a set of symbols loaded from
yahoo.
Parameters
----------
symbols : iterable[str]
The ticker symbols to load data for.
start : datetime, optional
The start date to query for. By default this pulls the full history
for the calendar.
end : datetime, optional
The end date to query for. By default this pulls the full history
for the calendar.
Returns
-------
ingest : callable
The bundle ingest function for the given set of symbols.
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import yahoo_equities, register
symbols = (
'AAPL',
'IBM',
'MSFT',
)
register('my_bundle', yahoo_equities(symbols))
Notes
-----
The sids for each symbol will be the index into the symbols sequence.
"""
# strict this in memory so that we can reiterate over it
symbols = tuple(symbols)
def ingest(environ,
asset_db_writer,
minute_bar_writer, # unused
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
# pass these as defaults to make them 'nonlocal' in py2
start=start,
end=end):
if start is None:
start = start_session
if end is None:
end = None
metadata = pd.DataFrame(np.empty(len(symbols), dtype=[
('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object'),
]))
def _pricing_iter():
sid = 0
with maybe_show_progress(
symbols,
show_progress,
label='Downloading Yahoo pricing data: ') as it, \
requests.Session() as session:
for symbol in it:
path = _cachpath(symbol, 'ohlcv')
try:
df = cache[path]
except KeyError:
df = cache[path] = DataReader(
symbol,
'yahoo',
start,
end,
session=session,
).sort_index()
# the start date is the date of the first trade and
# the end date is the date of the last trade
start_date = df.index[0]
end_date = df.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + pd.Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
df.rename(
columns={
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
},
inplace=True,
)
yield sid, df
sid += 1
daily_bar_writer.write(_pricing_iter(), show_progress=show_progress)
symbol_map = pd.Series(metadata.symbol.index, metadata.symbol)
# Hardcode the exchange to "YAHOO" for all assets and (elsewhere)
# register "YAHOO" to resolve to the NYSE calendar, because these are
# all equities and thus can use the NYSE calendar.
metadata['exchange'] = "YAHOO"
asset_db_writer.write(equities=metadata)
adjustments = []
with maybe_show_progress(
symbols,
show_progress,
label='Downloading Yahoo adjustment data: ') as it, \
requests.Session() as session:
for symbol in it:
path = _cachpath(symbol, 'adjustment')
try:
df = cache[path]
except KeyError:
df = cache[path] = DataReader(
symbol,
'yahoo-actions',
start,
end,
session=session,
).sort_index()
df['sid'] = symbol_map[symbol]
adjustments.append(df)
adj_df = pd.concat(adjustments)
adj_df.index.name = 'date'
adj_df.reset_index(inplace=True)
splits = adj_df[adj_df.action == 'SPLIT']
splits = splits.rename(
columns={'value': 'ratio', 'date': 'effective_date'},
)
splits.drop('action', axis=1, inplace=True)
dividends = adj_df[adj_df.action == 'DIVIDEND']
dividends = dividends.rename(
columns={'value': 'amount', 'date': 'ex_date'},
)
dividends.drop('action', axis=1, inplace=True)
# we do not have this data in the yahoo dataset
dividends['record_date'] = pd.NaT
dividends['declared_date'] = pd.NaT
dividends['pay_date'] = pd.NaT
adjustment_writer.write(splits=splits, dividends=dividends)
return ingest
# bundle used when creating test data
register(
'.test',
yahoo_equities(
(
'AMD',
'CERN',
'COST',
'DELL',
'GPS',
'INTC',
'MMM',
'AAPL',
'MSFT',
),
pd.Timestamp('2004-01-02', tz='utc'),
pd.Timestamp('2015-01-01', tz='utc'),
),
)
| apache-2.0 |
bhargav/scikit-learn | benchmarks/bench_isolation_forest.py | 40 | 3136 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http']#, 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(bootstrap=True, n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = model.predict(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC for %s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
ngoix/OCRF | examples/linear_model/plot_bayesian_ridge.py | 50 | 2733 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
nlpub/russe-evaluation | russe/evaluation/evaluate.py | 1 | 4899 | #!/usr/bin/env python
import argparse
from pandas import read_csv
from scipy.stats import pearsonr, spearmanr
from os.path import splitext
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import average_precision_score, precision_recall_curve, \
accuracy_score, roc_auc_score, classification_report
HJ_FILE = "hj-submission.csv"
SRC_FILE = "src-submission.csv"
SHOW = False
def hj_evaluation_args(args):
return hj_evaluation(args.hj_fpath)
def hj_evaluation(hj_fpath):
print "======================================================="
print "Evaluation based on correlations with human judgements"
print "See Section 1.1 of http://russe.nlpub.ru/task\n"
print "Input file:", hj_fpath
hj_df = read_csv(hj_fpath, ',', encoding='utf8')
print "Spearman's correlation with human judgements:\t%.5f (p-value = %.3f)" % spearmanr(hj_df.usim, hj_df.sim)
print "Pearson's correlation with human judgements:\t%.5f (p-value = %.3f)" % pearsonr(hj_df.usim, hj_df.sim)
return spearmanr(hj_df.usim, hj_df.sim)[0]
def semantic_relation_classification_evaluation_args(args):
return semantic_relation_classification_evaluation(args.src_fpath)
def semantic_relation_classification_evaluation(src_fpath):
print "\n======================================================="
print "Evaluation based on semantic relation classificaton"
print "See Section 1.2 of http://russe.nlpub.ru/task\n"
y_test, y_predict, y_score = predict_by_sim(src_fpath)
s = src_evaluation(y_test, y_predict, y_score, src_fpath)
return s
def src_evaluation(y_test, y_predict, y_score, src_fpath, print_data=False):
if print_data:
print_y(y_test, y_score)
# Compute performance metrics
s = {}
precision, recall, _ = precision_recall_curve(y_test, y_score)
s["average_precision"] = average_precision_score(y_test, y_score)
s["roc_auc"] = roc_auc_score(y_test, y_score)
s["accuracy"] = accuracy_score(y_test, y_predict)
for statistic in s:
print "%s: %.3f" % (statistic, s[statistic])
print classification_report(y_test, y_predict)
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall, precision, label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall curve: AUC={0:0.5f}'.format(s["average_precision"]))
if SHOW: plt.show()
fig_fpath = splitext(src_fpath)[0] + "-pr.png"
plt.savefig(fig_fpath)
print "precision-recall plot:", fig_fpath
return s["average_precision"], s["accuracy"]
def print_y(y_test, y_score):
y_test = np.reshape(y_test, (y_test.shape[0], 1))
y_score = np.reshape(y_score, (y_score.shape[0], 1))
y = np.hstack([y_test, y_score])
print y[y[:,1].argsort()][::-1]
def predict_by_sim(df_fpath):
df = read_csv(df_fpath, ',', encoding='utf8', error_bad_lines=False, warn_bad_lines=False)
df = df.sort_values(['word1', 'usim' , 'word2'], ascending=[1, 0, 1]) # by word2 in order to deal with many zeros and a pre-sorted frame
df_group = df.groupby(["word1"], sort=False).count()
rel_num = {r.name: r.word2 for i, r in df_group.iterrows()}
y_predict = []
w_i = 1
w_prev = ""
for i, r in df.iterrows():
if r.word1 != w_prev: w_i = 1
related = int(w_i <= float(rel_num[r.word1]) / 2)
y_predict.append(related)
w_i += 1
w_prev = r.word1
df["predict"] = y_predict
output_fpath = splitext(df_fpath)[0] + "-predict.csv"
df.to_csv(output_fpath, sep=',', encoding='utf-8', index=False)
print "predict:", output_fpath
df = df.fillna(0.0)
y_test = df.sim.values.tolist()
y_predict = df.predict.values.tolist()
y_score = df.usim.values.tolist()
return y_test, y_predict, y_score
def main():
parser = argparse.ArgumentParser(description='RUSSE Evaluation Script. See http://russe.nlpub.ru for more details.')
subparsers = parser.add_subparsers(description='Help for subcommand.')
parser_hj = subparsers.add_parser('hj', description='Evaluation based on correlations with human judgements.')
parser_hj.set_defaults(func=hj_evaluation_args)
parser_hj.add_argument('--hj_fpath', help='A CSV file in the format "word1,word2,hj,sim" e.g. ' + HJ_FILE, default=HJ_FILE)
parser_hj = subparsers.add_parser('src', description='Evaluation based on semantic relation classification.')
parser_hj.set_defaults(func=semantic_relation_classification_evaluation_args)
parser_hj.add_argument('--src_fpath', help='A CSV file in the format "word1,word2,related,sim" e.g. ' + SRC_FILE, default=SRC_FILE)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| mit |
mlassnig/pilot | Experiment.py | 3 | 37135 | # Class definition:
# Experiment
# This class is the main experiment class; ATLAS etc will inherit from this class
# Instances are generated with ExperimentFactory
# Subclasses should implement all needed methods prototyped in this class
# Note: not compatible with Singleton Design Pattern due to the subclassing
import os
import re
import time
import commands
from subprocess import Popen, PIPE
from PilotErrors import PilotErrors
from pUtil import tolog # Dump to pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import getCmtconfig # cmtconfig (move to subclass)
from pUtil import getDirectAccessDic # Get the direct access dictionary
from pUtil import isBuildJob # Is the current job a build job?
from pUtil import remove # Used to remove redundant file before log file creation
from pUtil import getPilotlogFilename # Used in the subprocess arguments method
from pUtil import extractHPCInfo # Used by getSubprocessName() to determine HPC plug-in if necessary
class Experiment(object):
# experiment = "generic" # String defining the experiment
# private data members
__experiment = "generic" # String defining the experiment
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
__doFileLookups = False # True for LFC based file lookups (basically a dummy data member here since singleton object is static)
__cache = "" # Cache URL used e.g. by LSST
# Required methods
def __init__(self, *args, **kwargs):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
# self.experiment = kwargs.get('experiment')
pass
def getExperiment(self):
""" Return a string with the experiment name """
# return self.experiment
return self.__experiment
def getJobExecutionCommand(self):
""" Define and test the command(s) that will be used to execute the payload """
# E.g. cmd = "source <path>/setup.sh; <path>/python <script>"
cmd = ""
return cmd
def getFileLookups(self):
""" Return the file lookup boolean """
return self.__doFileLookups
def doFileLookups(self, doFileLookups):
""" Update the file lookups boolean """
# Only implement this method if class really wants to update the __doFileLookups boolean
# ATLAS wants to implement this, but not CMS
# Method is used by Mover
# self.__doFileLookups = doFileLookups
pass
def willDoAlternativeFileLookups(self):
""" Should file lookups be done using alternative methods? """
# E.g. in the migration period where LFC lookups are halted in favour of other methods in the Rucio API
# (for ATLAS), this method could be useful. See the usage in Mover::getReplicaDictionary() which is called
# after Experiment::willDoFileLookups() defined above. The motivation is that direct LFC calls are not to be
# used any longer by the pilot, and in the migration period the actual LFC calls will be done in the Rucio
# API. Eventually this API will switch to alternative file lookups.
return False
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
return self.__doFileLookups
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
return False
def getFileCatalog(self):
""" Return the default file catalog to use (e.g. for replica lookups) """
# See usage in Mover.py
# e.g. 'lfc://prod-lfc-atlas.cern.ch:/grid/atlas'
return ""
# Additional optional methods
# These methods are optional and can be left as they are here, or modified according to special needs
def verifyProxy(self, envsetup="", limit=None):
""" Check for a valid voms/grid proxy longer than N hours """
# Use 'limit' to set required length
tolog("(verifyProxy() is not implemented)")
exitcode = 0
pilotErrorDiag = ""
return exitcode, pilotErrorDiag
def removeRedundantFiles(self, workdir):
""" Remove redundant files and directories """
# List of files and directories to be removed from work directory prior to log file creation
# Make sure that any large files or directories that are not wanted in the log file are included in this list
dir_list = [
"buildJob*",
"external",
"fort.*",
"home",
"python",
"share",
"workdir",
"*.py",
"*.pyc",
"*.root*",
"JEM",
"tmp*",
"*.tmp",
"*.TMP",
"scratch",
]
for _dir in dir_list:
files = glob(os.path.join(workdir, _dir))
rc = remove(files)
if not rc:
tolog("IGNORE: Failed to remove redundant file(s): %s" % (files))
def getPayloadName(self, job):
""" Set a suitable name for the payload stdout """
# The payload <name> gets translated into <name>_stdout.txt
# which is the name of the stdout file produced by the payload execution
# (essentially commands.getoutput("<setup>; <payload executable> [options] > <name>_stdout.txt"))
# The job object can be used to create more precise stdout names (see e.g. the ATLASExperiment implementation)
return "payload"
def isOutOfMemory(self, **kwargs):
""" Try to identify out of memory errors in the stderr/out """
return False
def getNumberOfEvents(self, **kwargs):
""" Return the number of events """
return 0
def specialChecks(self, **kwargs):
""" Implement special checks here """
# Return False if fatal failure, otherwise return True
# The pilot will abort if this method returns a False
# On an HPC system, it might be good to skip certain checks (e.g. CVMFS, LFC, etc). Refer to schedconfig.resourcetype, set to 'hpc' on an HPC queue
status = False
tolog("No special checks for \'%s\'" % (self.experiment))
return True # obviously change this to 'status' once implemented
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
ec = 0
tolog("No special env var checks for site %s" % (sitename))
return ec
def setINDS(self, realDatasetsIn):
""" Extract the dataset as set by pathena option --inDS and set the INDS environmental variable """
# Needed by pathena (move to ATLASExperiment later)
inDS = ""
for ds in realDatasetsIn:
if "DBRelease" not in ds and ".lib." not in ds:
inDS = ds
break
if inDS != "":
tolog("Setting INDS env variable to: %s" % (inDS))
os.environ['INDS'] = inDS
else:
tolog("INDS unknown")
def getValidBaseURLs(self, order=None):
""" Return list of valid base URLs """
# if order is defined, return given item first
# e.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
validBaseURLs = []
_validBaseURLs = ["http://www.usatlas.bnl.gov",\
"https://www.usatlas.bnl.gov",\
"http://pandaserver.cern.ch",\
"http://atlpan.web.cern.ch/atlpan",\
"https://atlpan.web.cern.ch/atlpan",\
"http://classis01.roma1.infn.it",\
"http://atlas-install.roma1.infn.it"]
if order:
validBaseURLs.append(order)
for url in _validBaseURLs:
if url != order:
validBaseURLs.append(url)
else:
validBaseURLs = _validBaseURLs
tolog("getValidBaseURLs will return: %s" % str(validBaseURLs))
return validBaseURLs
def downloadTrf(self, wgetCommand, jobTrf):
""" Download the trf """
status = False
pilotErrorDiag = ""
cmd = "%s %s" % (wgetCommand, jobTrf)
trial = 1
max_trials = 3
# try to download the trf a maximum of 3 times
while trial <= max_trials:
tolog("Executing command [Trial %d/%d]: %s" % (trial, max_trials, cmd))
ec, rets = commands.getstatusoutput(cmd)
if not rets:
rets = "(None)"
if ec != 0:
# Analyze exit code / output
from futil import check_syserr
check_syserr(ec, rets)
pilotErrorDiag = "wget command failed: %d, %s" % (ec, rets)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
if trial == max_trials:
tolog("!!FAILED!!3000!! Could not download trf: %s" % (rets))
status = False
break
else:
tolog("Will try again after 60s..")
from time import sleep
sleep(60)
else:
pilotErrorDiag = ""
tolog("wget command returned: %s" % (rets))
status = True
break
trial += 1
return status, pilotErrorDiag
def getAnalysisTrf(self, wgetCommand, origTRF, pilot_initdir):
""" Get the trf to be used for analysis jobs """
pilotErrorDiag = ""
trfName = origTRF.split('/')[-1]
tolog("trfName = %s" % (trfName))
origBaseURL = ""
# Copy trf from pilot init dir if distributed with pilot code
fname = os.path.join(pilot_initdir, trfName)
status = False
if os.path.exists(fname):
from shutil import copy2
try:
copy2(fname, os.getcwd())
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy trf from pilot init dir: %s" % str(e))
else:
tolog("Copied trf (%s) from pilot init dir" % (fname))
status = True
# Download trf
if not status:
# verify the base URL
for baseURL in self.getValidBaseURLs():
if origTRF.startswith(baseURL):
origBaseURL = baseURL
break
if origBaseURL == "":
pilotErrorDiag = "Invalid base URL: %s" % (origTRF)
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
else:
tolog("Verified the trf base URL: %s" % (origBaseURL))
# try to download from the required location, if not - switch to backup
for baseURL in self.getValidBaseURLs(order=origBaseURL):
trf = re.sub(origBaseURL, baseURL, origTRF)
tolog("Attempting to download trf: %s" % (trf))
status, pilotErrorDiag = self.downloadTrf(wgetCommand, trf)
if status:
break
if not status:
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
tolog("Successfully downloaded trf")
tolog("Changing permission of %s to 0755" % (trfName))
try:
os.chmod(trfName, 0755)
except Exception, e:
pilotErrorDiag = "Failed to chmod %s: %s" % (trfName, str(e))
return self.__error.ERR_CHMODTRF, pilotErrorDiag, ""
return 0, pilotErrorDiag, trfName
def getAnalysisRunCommand(self, job, jobSite, trfName):
""" Get the run command for analysis jobs """
# The run command is used to setup up the user job transform
ec = 0
pilotErrorDiag = ""
run_command = ""
return ec, pilotErrorDiag, run_command
def getFileTransferInfo(self, transferType, buildJob):
""" Get all relevant fields related to file transfer """
copysetup = readpar('copysetupin')
# create the direct access dictionary
fileTransferInfo = getDirectAccessDic(copysetup)
# if copysetupin did not contain direct access info, try the copysetup instead
if not fileTransferInfo:
copysetup = readpar('copysetup')
fileTransferInfo = getDirectAccessDic(copysetup)
# should the copytool be used?
useCopyTool = False
useFileStager = False
useDirectAccess = False
oldPrefix = ""
newPrefix = ""
dInfo = None
if fileTransferInfo:
dInfo = True
# no direct access / remote I/O, use standard copytool (copy-to-scratch)
if fileTransferInfo['useCopyTool']:
useCopyTool = True
# do not set the LFC host for file stager
if fileTransferInfo['useFileStager']:
useFileStager = True
if fileTransferInfo['directIn']:
useDirectAccess = True
oldPrefix = fileTransferInfo['oldPrefix']
newPrefix = fileTransferInfo['newPrefix']
# override settings for transferType direct
if transferType == 'direct':
useCopyTool = False
useFileStager = False
useDirectAccess = True
# should pilot create TURL based PFC? (not done here, but setup needs to be aware of it)
# if dInfo and useDirectAccess and oldPrefix == "" and newPrefix == "":
if (transferType == 'direct' or (useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == "") and not buildJob:
# if (transferType == 'direct' or (not useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == ""):
usePFCTurl = True
else:
usePFCTurl = False
# force usePFCTurl for all jobs
if not buildJob and useDirectAccess:
tolog("Forced usePFCTurl (reset old/newPrefix)")
usePFCTurl = True
oldPrefix = ""
newPrefix = ""
if os.environ.get("TestXRootD", 'False') == 'True':
import re
re.sub(r'\/xrootdsetup\.sh', '/xrootdsetup-dev.sh', copysetup)
return dInfo, useCopyTool, useDirectAccess, useFileStager, oldPrefix, newPrefix, copysetup, usePFCTurl
def getGuidsFromJobPars(self, jobPars, inputFiles, inFilesGuids):
""" Extract the correct guid from the input file list """
# the guids list is used for direct reading in an LFC environment
# 1. extract input file list for direct reading from jobPars
# 2. for each input file in this list, find the corresponding guid from the input file guid list
# since jobPars is entered by a human, the order of the input files might not be the same
guidList = []
jobPars = jobPars.replace("'","")
jobPars = jobPars.replace(", ",",")
pattern = re.compile(r'\-i \"\[([A-Za-z0-9.,_-]+)\]\"')
directReadingInputFiles = re.findall(pattern, jobPars)
inFiles = []
if directReadingInputFiles != []:
inFiles = directReadingInputFiles[0].split(",")
else:
match = re.search("-i ([A-Za-z0-9.\[\],_-]+) ", jobPars)
if match != None:
compactInFiles = match.group(1)
match = re.search('(.*)\[(.+)\](.*)\[(.+)\]', compactInFiles)
if match != None:
inputFiles = []
head = match.group(1)
tail = match.group(3)
body = match.group(2).split(',')
attr = match.group(4).split(',')
for idx in range(len(body)):
lfn = '%s%s%s%s' % (head, body[idx], tail, attr[idx])
inputFiles.append(lfn)
else:
inputFiles = [compactInFiles]
if inFiles != []:
for inFile in inFiles:
# get the corresponding index from the inputFiles list, which has the same order as inFilesGuids
try:
index = inputFiles.index(inFile)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s (direct reading will fail)" % str(e))
else:
# add the corresponding guid to the list
guidList.append(inFilesGuids[index])
return guidList
def getMetadataForRegistration(self, guid):
""" Return metadata for [LFC] file registration """
# This method can insert special metadata into the metadata.xml file
# E.g. it can add preliminary XML tags for info that will only be known
# at a later time, such as "<metadata att_name="surl" att_value="%s-surltobeset"/>\n' % (guid)"
# The <guid>-surltobeset will be replaced by the pilot by the appropriate value once it is known
# Inputs:
# guid = file guid
# Returns:
# metadata string
# See e.g. the CMSExperiment implementation
# The method is called from pUtil::PFCxml() during metadata file creation
return ""
def getAttrForRegistration(self):
""" Return the attribute of the metadata XML to be updated with surl value """
# Used in combination with Experiment::getMetadataForRegistration()
# The attribute (default 'surl') will be copied into the metadata string used for pattern matching
# E.g. re.compile('\<metadata att\_name\=\"%s\" att\_value\=\"([a-zA-Z0-9-]+)\-surltobeset\"\/\>' % (attribute))
return 'surl'
def getExpSpecificMetadata(self, job, workdir):
""" Return experiment specific metadata """
# Inputs:
# job = PanDA pilot job object (see Job class)
# workdir = relevant work directory where the metadata is located
# Returns:
# metadata xml string
# See e.g. implementation in CMSExperiment
return ""
def getFileCatalogHosts(self):
""" Return a list of file catalog hosts """
# The method is used in combination with federated xrootd (FAX).
# In case FAX is allowed on a given site, the pilot might need to lookup
# replica information in more than one LFC catalog. Normally a site has only
# one LFC (as set in schedconfig.lfchost). Providing a list of hosts will increase
# the probability that FAX will succeed
# See e.g. ATLASExperiment implementation
return []
def verifySwbase(self, appdir):
""" Confirm existence of appdir/swbase """
# appdir/swbase is a queuedata parameter specifying the base location of physics analysis / release software
# This method will simply verify that the corresponding directory exists
#
# Input:
# appdir = application/software/release directory (e.g. /cvmfs/atlas.cern.ch/repo/sw)
# Return:
# error code (0 for success)
return 0
def interpretPayloadStdout(self, job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode):
""" Payload error interpretation and handling """
# NOTE: TODO, hide argument complexity with kwargs**
# This method can be used to interpret special errors that only occur in actual payload stdout, e.g. memory errors that have
# caused the payload to crash
#
# Inputs:
# job = PanDA pilot job object (see Job class)
# res =
# getstatusoutput_was_interrupted = True in case the payload execution command was aborted (e.g. keyboard CTRL-C)
# current_job_number = current job number, in case of multi-trf (ATLAS)
# runCommandList = list of payload execution commands (e.g. used by ATLAS to get to a setup file)
# failureCode = signal error code
# Returns:
# Updated PanDA pilot job objectwith proper payload error information, if needed
#
# The following Job attributes can be updated here
# result = tuple of size 3 that contain the standard error info: result[0] = current job status (e.g. failed, finished, holding),
# result[1] = payload error code, result[2] = PanDA pilot error code
# pilotErrorDiag = error diagnostics (string of up to 256 characters that will appear on the PanDA monitor job web page for a failed job)
# exeError
return job
def getSubprocessName(self, eventService):
""" Select which subprocess is to be run by the Monitor """
# The default subprocess is RunJob (name='Normal', which performs payload setup, stage-in, payload execution and stage-out).
# An alternative subprocess is the runEvent module which downloads events from an Event Server, executes a payload
# and stages ou output files asynchronously as they are ready.
# Note: send the entire job object to this method since there might be other subprocesses created at a later time which
# will be identified by this method using some other job data member
# Default subprocess name
name = "RunJob"
# Select alternative subprocess names for HPCs
isHPC, _name = extractHPCInfo(readpar('catchall'))
if isHPC:
name = "RunJob" + _name # e.g. "RunJobTitan" is the proper subprocess name for the Titan plug-in
# for es merge jobs
if _name and _name.startswith("Hpc"):
name = "RunJob"
# Are we going to run an event service job?
if eventService:
tolog("Encountered an event service job")
if isHPC:
name = "RunJob%sEvent" % (_name)
else:
name = "RunJobEvent"
tolog("Selected subprocess: %s" % (name))
return name
def getSubprocessArguments(self, env, port, subprocessName="RunJob"):
""" Argument list needed to launch the subprocess by the pilot/Monitor """
# The pilot/Monitor is forking a subprocess which will be monitored for work dir size, hanging processes etc
# This method returns the arguments needed to execute the subprocess (python <subprocess name> <arguments>)
# By default the pilot has implementations for RunJob.py (standard job) and RunJobEvent.py (event server job)
# If a new subprocess module is added, it startup arguments need to be specified here
jobargs = None
tolog("Will set up subprocess arguments for type: %s" % (subprocessName))
url = '%s:%s/server/panda' % (env['pshttpurl'], str(env['psport']))
if subprocessName == "RunJobEvent":
jobargs = [env['pyexe'], "RunJobEvent.py",
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
else:
jobargs = [env['pyexe'], "%s.py" % (subprocessName),
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
if 'yodaNodes' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-N")
jobargs.append(str(env['yodaNodes']))
if 'yodaQueue' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-Q")
jobargs.append(str(env['yodaQueue']))
tolog("Will use arguments: %s" % str(jobargs))
return jobargs
# Optional
def doSpecialLogFileTransfer(self, **argdict):
""" Should the log file be transfered to a special SE? """
# The log file can at the end of the job be stored in a special SE - in addition to the normal stage-out of the log file
# If this method returns True, the JobLog class will attempt to store the log file in a secondary SE after the transfer of
# the log to the primary/normal SE. Additional information about the secondary SE is required and can be specified in
# another optional method defined in the *Experiment classes
# eventService = argdict.get('eventService', False)
return False
# Optional
def getSchedconfigURL(self, protocol="http://"):
""" Define the URL for the schedconfig / PanDA server"""
# This method gets called from SiteInformation in case the URL is not set (by the wrapper)
return protocol + "pandaserver.cern.ch"
# Optional
def getSubprocess(self, cmd, stdout=None, stderr=None):
""" Execute and return a subprocess """
process = None
try:
tolog("Executing command: %s" % (cmd))
if stdout and stderr:
# use stdout/stdout file objects to redirect the stdout/stderr streams
process = Popen(cmd, shell=True, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
else:
process = Popen(cmd, shell=True)
except Exception, e:
tolog("!!WARNING!!2344!! Caught exception: %s" % (e))
else:
tolog("Subprocess is running")
return process
# Optional
def getJobExecutionCommand4EventService(self):
""" Define and test the command(s) that will be used to execute the payload for the event service """
# E.g. cmd = ["source <path>/setup.sh; <path>/python <script>"]
# The command returned from this method is executed using subprocess.Popen() from the runEvent module
# Note: this optional method only need to be defined in case the event service is to be used
# As of March 2014, this is not yet functional or documented.
# The actual command must be declared as a list since that is expected by Popen()
cmd = [""]
return cmd
# Optional
def postGetJobActions(self, job):
""" Perform any special post-job definition download actions here """
# This method is called after the getJob() method has successfully downloaded a new job (job definition) from
# the server. If the job definition e.g. contains information that contradicts WN specifics, this method can
# be used to fail the job
# Return any error code using ec, and any error message using pilotErrorDiag
ec = 0
pilotErrorDiag = ""
return ec, pilotErrorDiag
# Optional
def useTracingService(self):
return False
# Optional
def updateJobSetupScript(self, workdir, create=False, to_script=None):
""" Create or update the job setup script (used to recreate the job locally if needed) """
# If create=True, this step will only create the file with the script header (bash info)
if create:
filename = os.path.basename(self.getJobSetupScriptName(workdir))
tolog("Creating job setup script with stage-in and payload execution commands: %s" % (filename))
to_script = "#!/bin/bash\n# %s %s\n\n" % (filename, time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.time())))
# Add the string to the setup script
if to_script:
self.addToJobSetupScript(to_script, workdir)
# Optional
def getJobSetupScriptName(self, workdir):
""" return the name of the job setup file """
return os.path.join(workdir, "job_setup.sh")
# Optional
def addToJobSetupScript(self, cmd, workdir):
""" add/append command to job setup file """
filename = self.getJobSetupScriptName(workdir)
if not os.path.exists(filename):
try:
fp = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for writing: %s" % str(e))
else:
try:
fp = open(filename, "a")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for appending: %s" % str(e))
if fp:
fp.write(cmd)
fp.write("\n\n")
fp.close()
tolog("Updated %s: %s" % (filename, cmd))
# Optional
def getRelease(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
return release.split("\n")
# Optional
def formatReleaseString(release):
""" Return a special formatted release string """
# E.g. release = "Atlas-19.0.0" -> "19.0.0"
# This method is required for ATLAS but is probably of no interest for any other PanDA user
return release
# Optional
def setCache(self, cache):
""" Cache URL """
# Used e.g. by LSST
self.__cache = cache
# Optional
def getCache(self):
""" Return the cache URL """
# Used e.g. by LSST
return self.__cache
# Optional
def useTracingService(self):
""" Use the Rucio Tracing Service """
# A service provided by the Rucio system that allows for file transfer tracking; all file transfers
# are reported by the pilot to the Rucio Tracing Service if this method returns True
return False
# Optional
def updateJobDefinition(self, job, filename):
""" Update the job definition file and object before using it in RunJob """
# This method is called from Monitor, before RunJob is launched, which allows to make changes to the job object after it was downloaded from the job dispatcher
# (used within Monitor) and the job definition file (which is used from RunJob to recreate the same job object as is used in Monitor).
# 'job' is the job object, defined in Job.py, while 'filename' is the name of the file containing the job definition information.
return job
# Optional
def shouldExecuteUtility(self):
""" Determine whether a special utility should be executed """
# The RunJob class has the possibility to execute a special utility, e.g. a memory monitor, that runs in parallel
# to the payload (launched after the main payload process).
# The utility is executed if this method returns True. The utility is currently expected to produce
# a summary JSON file whose name is defined by the getUtilityJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the job update.
#
# Example of summary JSON file (ATLAS case):
# {"Max":{"maxVMEM":40058624,"maxPSS":10340177,"maxRSS":16342012,"maxSwap":16235568},
# "Avg":{"avgVMEM":19384236,"avgPSS":5023500,"avgRSS":6501489,"avgSwap":5964997}}
#
# While running, the MemoryMonitor also produces a regularly updated text file with the following format: (tab separated)
# Time VMEM PSS RSS Swap (first line in file)
# 1447960494 16099644 3971809 6578312 1978060
return False
# Optional
def getUtilityOutputFilename(self):
""" Return the filename of a utility output file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_output.txt"
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of a utility JSON file """
# For explanation, see shouldExecuteUtility()
return "utility_summary.json"
# Optional
def getUtilityInfo(self, workdir, pilot_initdir, allowTxtFile=False):
""" Add the utility info to the node structure if available """
# Extract the relevant information from the utility tool output and add it to the dictionary
# returned by this method. The dictionary will be merged with the node dictionary in
# PandaServerClient::getNodeStructure() and sent to the PanDA server
return {}
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
# pid = argdict.get('pid', 0)
return ""
# Optional
def getGUIDSourceFilename(self):
""" Return the filename of the file containing the GUIDs for the output files """
# In the case of ATLAS, Athena produces an XML file containing the GUIDs of the output files. The name of this
# file is PoolFileCatalog.xml. If this method returns an empty string (ie the default), the GUID generation will
# be done by the pilot in RunJobUtilities::getOutFilesGuids()
return ""
# Optional
def buildFAXPath(self, **argdict):
""" Build a proper FAX path """
# This method builds proper FAX paths and is used in pure FAX mode (i.e. when FAX is used in forced mode),
# particularly when the PoolFileCatalog.xml is built prior to stage-in
# Only needed if FAX mechanism is used in forced mode (i.e. when copytoolin='fax')
lfn = argdict.get('lfn', 'default_lfn')
scope = argdict.get('scope', 'default_scope')
subpath = argdict.get('subpath', 'atlas/rucio/')
pandaID = argdict.get('pandaID', '')
sourceSite = argdict.get('sourceSite', 'default_sourcesite')
computingSite = argdict.get('computingSite', 'default_computingsite')
# Get the proper FAX redirector (default ATLAS implementation)
from FAXTools import getFAXRedirectors
# First get the global redirectors (several, since the lib file might not be at the same place for overflow jobs)
fax_redirectors_dictionary = getFAXRedirectors(computingSite, sourceSite, pandaID)
tolog("fax_redirectors_dictionary=%s"%str(fax_redirectors_dictionary))
# select the proper fax redirector
if ".lib." in lfn:
redirector = fax_redirectors_dictionary['computingsite']
else:
redirector = fax_redirectors_dictionary['sourcesite']
# Make sure the redirector ends with a double slash
if not redirector.endswith('//'):
if redirector.endswith('/'):
redirector += "/"
else:
redirector += "//"
# Make sure that the subpath does not begin with a slash
if subpath.startswith('/') and len(subpath) > 1:
subpath = subpath[1:]
tolog("redirector=%s"%(redirector))
tolog("subpath=%s"%(subpath))
tolog("scope=%s"%(scope))
tolog("lfn=%s"%(lfn))
return redirector + subpath + scope + ":" + lfn
if __name__ == "__main__":
a=Experiment()
print a.getSubprocessName(False)
| apache-2.0 |
MRod5/pyturb | src/pyturb/gas_models/gas_mixture.py | 1 | 9198 | """
gas_mixture:
------------
Gas mixture of ideal gases. The approach for the ideal gases may be Perfect or Semiperfect.
MRodriguez 2020
"""
import pyturb.utils.constants as cts
from pyturb.gas_models.gas import Gas
from pyturb.gas_models.perfect_ideal_gas import PerfectIdealGas
from pyturb.gas_models.semiperfect_ideal_gas import SemiperfectIdealGas
import numpy as np
import pandas as pd
import warnings
class GasMixture(object):
"""
GasMixture:
-----------
"""
def __init__(self, gas_model = "perfect", mixture=None):
"""
"""
# Gas model selector:
if gas_model.lower() == "perfect":
self.gas_model = PerfectIdealGas
elif gas_model.lower() == "semi-perfect" or gas_model.lower() == "semiperfect":
self.gas_model = SemiperfectIdealGas
else:
self.gas_model = None
raise ValueError("gas_model may be 'perfect' or 'semi-perfect', instead received {}".format(gas_model))
self._n_species = 0
self._gas_species = "mixture"
self._mixture_gases_columns = ['gas_species', 'gas_properties', 'Ng', 'Mg', 'mg', 'Rg', 'molar_frac', 'mass_frac']
self._mixture_gases = pd.DataFrame(columns=self._mixture_gases_columns)
# Mixture initialization:
if not mixture is None:
if not type(mixture) is dict:
warnings.warn("mixture must be a dictionary with keys:=species and value:=moles. Instead received {}".format(mixture))
else:
#TODO: call add_gas for each pure substance in the mixture
print("")
return
@property
def gas_species(self):
"""
Gets the Name of the gas species selected. May be a pure substance or any of the
molecules and mixes considered in "NASA Glenn Coefficients for Calculating Thermodynamic
Properties of Individual Species".
"""
return self._gas_species
@property
def n_species(self):
"""
Number of different gas species in the gas mixture
"""
return self._n_species
@property
def mixture_gases(self):
"""
Dataframe with the thermodynamic properties of the gas species in the mixture.
"""
return self._mixture_gases
@property
def Ng(self):
"""
Moles of the mixture. [mol]
"""
return self._Ng
@property
def mg(self):
"""
Mass quantity of the mixture. [kg]
"""
return self._mg
@property
def Ru(self):
"""
Get the Ideal Gas Law constant Ru [J/mol/K]
"""
Ru = cts.Ru
return Ru
@property
def Rg(self):
"""
Get the Mixture Gas constant Rg = Ru/Mg [J/kg/K]
"""
Rg = self.Ru/self.Mg*1e3
return Rg
@property
def Mg(self):
"""
Get the Mixture molecular mass [g/mol]
"""
return self._Mg
def add_gas(self, species, moles=None, mass=None):
"""
add_gas:
--------
Adds a gas species to the mixture. The amount of gas must be specified
in moles or kilograms.
- species: string. Gas species.
- moles: float. Number of moles of the selected gas. If None, then mass must be provided
- mass: float. Mass quantity of the selected gas. If none, then moles must be provided
"""
if moles is None and mass is None:
raise ValueError('Quantity (moles or mass) of gas must be specified in add_gas.')
else:
pure_substance = self.gas_model(species)
if mass is None:
moles_ = moles # mol
mass_ = moles_ * pure_substance.Rg * 1e-3 # kg
elif moles is None:
mass_ = mass #kg
moles_ = mass_ / pure_substance.Rg * 1e3 # mol
else:
warnings.warn("mass ({0}kg) will be dismised and recalculated with the moles quantity provided: {1}mol.".format(mass, moles))
moles_ = moles
mass_ = mass_ = moles_ * pure_substance.Rg * 1e3 # kg
subst_props = {'gas_species': pure_substance.gas_species, 'gas_properties': pure_substance, 'Ng': moles_, 'Mg': pure_substance.thermo_prop.Mg, 'mg': mass_, 'Rg': pure_substance.Rg, 'molar_frac': np.nan, 'mass_frac': np.nan}
new_gas = len(self._mixture_gases.index)
self._mixture_gases.loc[new_gas] = subst_props
self._update_mixture_properties()
return
def _update_mixture_properties(self):
"""
Updates mixture properties of the gas mixture
"""
self._n_species = len(self.mixture_gases.index)
self._Ng = np.sum(self.mixture_gases['Ng'])
self._mg = np.sum(self.mixture_gases['mg'])
self._mixture_gases['molar_frac'] = self.mixture_gases['Ng'] / self.Ng
self._mixture_gases['mass_frac'] = self.mixture_gases['mg'] / self.mg
self._Mg = 0
for ii, xi in enumerate(self.mixture_gases['molar_frac']):
self._Mg += xi * self.mixture_gases.loc[ii]['Mg']
return
def cp_molar(self, temperature=None):
"""
Molar heat capacity ratio at constant pressure [J/mol/K].
As a semiperfect gas, cp is a function of the temperature. It is calculated as a
7 coefficients polynomial:
cp/Rg = a1*T**(-2) + a2*T**(-1) + a3 + a4*T**(1) + a5*T**(2) + a6*T**(3) + a7*T**(4)
As a perfect gas, cp is considered invariant with temperature. It is calculated as a
semi-perfect gas (which means cp(T)) at T_ref temperature for any temperature.
"""
if temperature is None:
if isinstance(self.gas_model, SemiperfectIdealGas):
raise ValueError("If gas model is semi-perfect a temperature must be provided to calculate the cp.")
cp_ = 0
for ii, xi in enumerate(self.mixture_gases['molar_frac']):
cp_ += xi * self.mixture_gases.loc[ii]['gas_properties'].cp_molar(temperature)
return cp_
def cp(self, temperature=None):
"""
Heat capacity ratio at constant pressure [J/kg/K].
As a semiperfect gas, cp is a function of the temperature. It is calculated as a
7 coefficients polynomial:
cp/Rg = a1*T**(-2) + a2*T**(-1) + a3 + a4*T**(1) + a5*T**(2) + a6*T**(3) + a7*T**(4)
As a perfect gas, cp is considered invariant with temperature. It is calculated as a
semi-perfect gas (which means cp(T)) at T_ref temperature for any temperature.
"""
cp_ = self.cp_molar(temperature)
cp_ *= 1e3/self.Mg
return cp_
def cv_molar(self, temperature=None):
"""
Molar heat capacity ratio at constant volume [J/mol/K].
As a semiperfect gas, cp is a function of the temperature. It is calculated as a
7 coefficients polynomial:
cp/Rg = a1*T**(-2) + a2*T**(-1) + a3 + a4*T**(1) + a5*T**(2) + a6*T**(3) + a7*T**(4)
As a semiperfect gas, cv is a function of tempeature. cv is calculated with the
Mayer equation: cv(T) = cp(T) - Rg (ideal gas).
"""
cv_ = self.cp_molar(temperature) - self.Ru
return cv_
def cv(self, temperature=None):
"""
Heat capacity ratio at constant volume [J/kg/K]
As an ideal gas, cv is invariant with tempeature. cv is calculated with the
Mayer equation: cv = cp - Rg.
"""
cv_ = self.cp(temperature) - self.Rg
return cv_
def gamma(self, temperature=None):
"""
Heat capacity ratio cp/cv [-].
As a perfect gas, gamma is considered invariant with temperature. gamma is calculated
as gamma = cp/cv.
As a semiperfect gas, gamma is a function of tempeature. Gamma is calculated with the
Mayer equation: cv(T) = cp(T) - Rg (ideal gas).
"""
gamma_ = self.cp_molar(temperature) / self.cv_molar(temperature)
return gamma_
def h0_molar(self, temperature):
"""
assigned molar enthalpy:
h0(T) = deltaHf(T_ref) + (h0(T) - h0(T_ref)) [J/mol].
"""
if temperature is None:
if isinstance(self.gas_model, SemiperfectIdealGas):
raise ValueError("If gas model is semi-perfect a temperature must be provided to calculate the cp.")
h0_ = 0
for ii, xi in enumerate(self.mixture_gases['molar_frac']):
h0_ += xi * self.mixture_gases.loc[ii]['gas_properties'].h0_molar(temperature)
return h0_
def h0(self, temperature):
"""
assigned enthalpy:
h0(T) = deltaHf(T_ref) + (h0(T) - h0(T_ref)) [J/kg].
"""
if temperature is None:
if isinstance(self.gas_model, SemiperfectIdealGas):
raise ValueError("If gas model is semi-perfect a temperature must be provided to calculate the cp.")
h0_ = self.h0_molar(temperature) * 1e3/self.Mg
return h0_ | mit |
marianotepper/dask | dask/array/core.py | 2 | 59930 | from __future__ import absolute_import, division, print_function
import operator
from operator import add, getitem
import inspect
from numbers import Number
from collections import Iterable
from bisect import bisect
from itertools import product, count
from collections import Iterator
from functools import partial, wraps
from toolz.curried import (pipe, partition, concat, unique, pluck, join, first,
memoize, map, groupby, valmap, accumulate, merge,
curry, reduce, interleave, sliding_window)
import numpy as np
from threading import Lock
from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..utils import deepmap, ignoring, repr_long_list, concrete
from ..compatibility import unicode
from .. import threaded, core
from ..context import _globals
names = ('x_%d' % i for i in count(1))
tokens = ('-%d' % i for i in count(1))
def getarray(a, b, lock=None):
""" Mimics getitem but includes call to np.asarray
>>> getarray([1, 2, 3, 4, 5], slice(1, 4))
array([2, 3, 4])
"""
if lock:
lock.acquire()
try:
c = a[b]
if type(c) != np.ndarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
from .optimization import optimize
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]
shapes = product(*chunks)
starts = product(*cumdims)
return [tuple(slice(s, s+dim) for s, dim in zip(start, shape))
for start, shape in zip(starts, shapes)]
def getem(arr, chunks, shape=None):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
"""
chunks = normalize_chunks(chunks, shape)
keys = list(product([arr], *[range(len(bds)) for bds in chunks]))
values = [(getarray, arr, x) for x in slices_from_chunks(chunks)]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims)
for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarassingly parallel communication pattern and is read as
z_i = func(x_i, y_i)
More complex patterns may emerge, including multiple indices
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
"""
numblocks = kwargs['numblocks']
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
args.append(tups2)
valtups.append(tuple(args))
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
vals = [(func,) + vt for vt in valtups]
return dict(zip(keys, vals))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def map_blocks(func, *arrs, **kwargs):
""" Map a function across all blocks of a dask array
You must also specify the chunks of the resulting array. If you don't then
we assume that the resulting array has the same block structure as the
input.
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If function changes shape of the blocks then please provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
Your block function can learn where in the array it is if it supports a
``block_id`` keyword argument. This will receive entries like (2, 0, 1),
the position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
"""
if not callable(func):
raise TypeError("First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)" %
type(func).__name__)
dtype = kwargs.get('dtype')
assert all(isinstance(arr, Array) for arr in arrs)
inds = [tuple(range(x.ndim))[::-1] for x in arrs]
args = list(concat(zip(arrs, inds)))
out_ind = tuple(range(max(x.ndim for x in arrs)))[::-1]
result = atop(func, out_ind, *args, dtype=dtype)
# If func has block_id as an argument then swap out func
# for func with block_id partialed in
try:
spec = inspect.getargspec(func)
except:
spec = None
if spec and 'block_id' in spec.args:
for k in core.flatten(result._keys()):
result.dask[k] = (partial(func, block_id=k[1:]),) + result.dask[k][1:]
# Assert user specified chunks
chunks = kwargs.get('chunks')
if chunks is not None and chunks and not isinstance(chunks[0], tuple):
chunks = tuple([nb * (bs,)
for nb, bs in zip(result.numblocks, chunks)])
if chunks is not None:
result.chunks = chunks
return result
@wraps(np.squeeze)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
b = a.map_blocks(partial(np.squeeze, axis=axis), dtype=a.dtype)
chunks = tuple(bd for bd in b.chunks if bd != (1,))
old_keys = list(product([b.name], *[range(len(bd)) for bd in b.chunks]))
new_keys = list(product([b.name], *[range(len(bd)) for bd in chunks]))
dsk = b.dask.copy()
for o, n in zip(old_keys, new_keys):
dsk[n] = dsk[o]
del dsk[o]
return Array(dsk, b.name, chunks, dtype=a.dtype)
def topk(k, x):
""" The top k elements of an array
Returns the k greatest elements of the array in sorted order. Only works
on arrays of a single dimension.
>>> x = np.array([5, 1, 3, 6])
>>> d = from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
Runs in near linear time, returns all results in a single chunk so
all k elements must fit in memory.
"""
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
name = next(names)
dsk = dict(((name, i), (chunk.topk, k, key))
for i, key in enumerate(x._keys()))
name2 = next(names)
dsk[(name2, 0)] = (getitem,
(np.sort, (np.concatenate, (list, list(dsk)))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(merge(dsk, x.dask), name2, chunks, dtype=x.dtype)
def compute(*args, **kwargs):
""" Evaluate several dask arrays at once
The result of this function is always a tuple of numpy arrays. To evaluate
a single dask array into a numpy array, use ``myarray.compute()`` or simply
``np.array(myarray)``.
Examples
--------
>>> import dask.array as da
>>> d = da.ones((4, 4), chunks=(2, 2))
>>> a = d + 1 # two different dask arrays
>>> b = d + 2
>>> A, B = da.compute(a, b) # Compute both simultaneously
"""
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
results2 = tuple(concatenate3(x) if arg.shape else unpack_singleton(x)
for x, arg in zip(results, args))
return results2
def store(sources, targets, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or iterable of array-likes
These should support setitem syntax ``target[10:20] = ...``
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError("Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets)))
updates = [insert_to_ooc(tgt, src) for tgt, src in zip(targets, sources)]
dsk = merge([src.dask for src in sources] + updates)
keys = [key for u in updates for key in u]
get(dsk, keys, **kwargs)
def blockdims_from_blockshape(shape, blockshape):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
"""
if blockshape is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
return tuple((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
for d, bd in zip(shape, blockshape))
class Array(object):
""" Parallel Array
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
"""
__slots__ = 'dask', 'name', 'chunks', '_dtype'
def __init__(self, dask, name, chunks, dtype=None, shape=None):
self.dask = dask
self.name = name
self.chunks = normalize_chunks(chunks, shape)
if dtype is not None:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def _args(self):
return (self.dask, self.name, self.chunks, self.dtype)
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def shape(self):
return tuple(map(sum, self.chunks))
def __len__(self):
return sum(self.chunks[0])
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
@memoize(key=lambda args, kwargs: (id(args[0]), args[0].name, args[0].chunks))
def dtype(self):
if self._dtype is not None:
return self._dtype
if self.shape:
return self[(0,) * self.ndim].compute().dtype
else:
return self.compute().dtype
def __repr__(self):
chunks = '(' + ', '.join(map(repr_long_list, self.chunks)) + ')'
return ("dask.array<%s, shape=%s, chunks=%s, dtype=%s>" %
(self.name, self.shape, chunks, self._dtype))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return np.prod(self.shape)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
def _keys(self, *args):
if self.ndim == 0:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
return [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
return [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
@wraps(store)
def store(self, target, **kwargs):
return store([self], [target], **kwargs)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See also
--------
da.store
h5py.File.create_dataset
"""
import h5py
with h5py.File(filename) as f:
if 'chunks' not in kwargs:
kwargs['chunks'] = tuple([c[0] for c in self.chunks])
d = f.require_dataset(datapath, shape=self.shape, dtype=self.dtype, **kwargs)
slices = slices_from_chunks(self.chunks)
name = next(names)
dsk = dict(((name,) + t[1:], (write_hdf5_chunk, filename, datapath, slc, t))
for t, slc in zip(core.flatten(self._keys()), slices))
myget = kwargs.get('get', get)
myget(merge(dsk, self.dask), list(dsk.keys()))
@wraps(compute)
def compute(self, **kwargs):
result, = compute(self, **kwargs)
return result
def __int__(self):
return int(self.compute())
def __bool__(self):
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def __float__(self):
return float(self.compute())
def __complex__(self):
return complex(self.compute())
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
( isinstance(index, list)
and all(isinstance(i, (str, unicode)) for i in index))):
if self._dtype is not None and isinstance(index, (str, unicode)):
dt = self._dtype[index]
elif self._dtype is not None and isinstance(index, list):
dt = np.dtype([(name, self._dtype[name]) for name in index])
else:
dt = None
return elemwise(getarray, self, index, dtype=dt)
# Slicing
out = next(names)
if not isinstance(index, tuple):
index = (index,)
if all(isinstance(i, slice) and i == slice(None) for i in index):
return self
dsk, chunks = slice_array(out, self.name, self.chunks, index)
return Array(merge(self.dask, dsk), out, chunks, dtype=self._dtype)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other, axes=((self.ndim-1,), (other.ndim-2,)))
@property
def T(self):
return transpose(self)
@wraps(np.transpose)
def transpose(self, axes=None):
return transpose(self, axes)
@wraps(topk)
def topk(self, k):
return topk(k, self)
def astype(self, dtype, **kwargs):
""" Copy of the array, cast to a specified type """
return elemwise(partial(np.ndarray.astype, dtype=dtype, **kwargs),
self, dtype=dtype)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(np.any)
def any(self, axis=None, keepdims=False):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims)
@wraps(np.all)
def all(self, axis=None, keepdims=False):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims)
@wraps(np.min)
def min(self, axis=None, keepdims=False):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims)
@wraps(np.max)
def max(self, axis=None, keepdims=False):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims)
@wraps(np.argmin)
def argmin(self, axis=None):
from .reductions import argmin
return argmin(self, axis=axis)
@wraps(np.argmax)
def argmax(self, axis=None):
from .reductions import argmax
return argmax(self, axis=axis)
@wraps(np.sum)
def sum(self, axis=None, dtype=None, keepdims=False):
from .reductions import sum
return sum(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.prod)
def prod(self, axis=None, dtype=None, keepdims=False):
from .reductions import prod
return prod(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.mean)
def mean(self, axis=None, dtype=None, keepdims=False):
from .reductions import mean
return mean(self, axis=axis, dtype=dtype, keepdims=keepdims)
@wraps(np.std)
def std(self, axis=None, dtype=None, keepdims=False, ddof=0):
from .reductions import std
return std(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
@wraps(np.var)
def var(self, axis=None, dtype=None, keepdims=False, ddof=0):
from .reductions import var
return var(self, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)
def vnorm(self, ord=None, axis=None, keepdims=False):
""" Vector norm """
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims)
@wraps(map_blocks)
def map_blocks(self, func, chunks=None, dtype=None):
return map_blocks(func, self, chunks=chunks, dtype=dtype)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of cells that each block should share with its neighbors
If a tuple or dict this can be different per axis
boundary: str
how to handle the boundaries. Values include 'reflect', 'periodic'
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim the excess after the map function. Set this
to false if your mapping function does this for you.
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
"""
from .ghost import map_overlap
return map_overlap(self, func, depth, boundary, trim, **kwargs)
@wraps(squeeze)
def squeeze(self):
return squeeze(self)
def rechunk(self, chunks):
from .rechunk import rechunk
return rechunk(self, chunks)
def normalize_chunks(chunks, shape=None):
""" Normalize chunks to tuple of tuples
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(4, 6)) # Idempotent
((2, 2, 1), (2, 2, 2))
>>> normalize_chunks([[2, 2], [3, 3]]) # Cleans up lists to tuples
((2, 2), (3, 3))
>>> normalize_chunks(10, shape=(30, 5)) # Supports integer inputs
((10, 10, 10), (5,))
>>> normalize_chunks((), shape=(0, 0)) # respects null dimensions
((), ())
"""
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, Number):
chunks = (chunks,) * len(shape)
if not chunks:
if shape is None:
chunks = ()
else:
chunks = ((),) * len(shape)
if chunks and not isinstance(chunks[0], (tuple, list)):
chunks = blockdims_from_blockshape(shape, chunks)
chunks = tuple(map(tuple, chunks))
return chunks
def from_array(x, chunks, name=None, lock=False, **kwargs):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
The ``chunks`` argument must be one of the following forms:
- a blocksize like 1000
- a blockshape like (1000, 1000)
- explicit sizes of all blocks along all dimensions
like ((1000, 1000, 500), (400, 400)).
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
"""
chunks = normalize_chunks(chunks, x.shape)
name = name or next(names)
dsk = getem(name, chunks)
if lock is True:
lock = Lock()
if lock:
dsk = dict((k, v + (lock,)) for k, v in dsk.items())
return Array(merge({name: x}, dsk), name, chunks, dtype=x.dtype)
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Parameters
----------
func: callable
Function to apply to individual tuples of blocks
out_ind: iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
This is best explained through example. Consider the following examples:
Examples
--------
2D embarassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij') # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterator of blocks on any array that holds that
index.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i') # doctest: +SKIP
Many dask.array operations are special cases of atop. These tensor
operations cover a broad subset of NumPy and this function has been battle
tested, supporting tricky concepts like broadcasting.
See also:
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) or next(names)
dtype = kwargs.get('dtype', None)
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
shapes = dict((a.name, a.shape) for a, _ in arginds)
nameinds = [(a.name, i) for a, i in arginds]
dims = broadcast_dimensions(nameinds, shapes)
shape = tuple(dims[i] for i in out_ind)
blockdim_dict = dict((a.name, a.chunks) for a, _ in arginds)
chunkss = broadcast_dimensions(nameinds, blockdim_dict)
chunks = tuple(chunkss[i] for i in out_ind)
dsks = [a.dask for a, _ in arginds]
return Array(merge(dsk, *dsks), out, chunks, dtype=dtype)
def get(dsk, keys, get=None, **kwargs):
""" Specialized get function
1. Handle inlining
2. Use custom score function
"""
get = get or _globals['get'] or threaded.get
dsk2 = optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array(datetime.date(2000, 1, 1), dtype='datetime64[D]')
"""
while True:
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
stacked_names = ('stack-%d' % i for i in count(1))
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
assert len(set(a.chunks for a in seq)) == 1 # same chunks
shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
chunks = ( seq[0].chunks[:axis]
+ ((1,) * n,)
+ seq[0].chunks[axis:])
name = next(stacked_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
names = [a.name for a in seq]
inputs = [(names[key[axis+1]],) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
values = [(getarray, inp, (slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis))
for inp in inputs]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
concatenate_names = ('concatenate-%d' % i for i in count(1))
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
raise ValueError("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
bds = [a.chunks for a in seq]
if not all(len(set(bds[i][j] for i in range(n))) == 1
for j in range(len(bds[0])) if j != axis):
raise ValueError("Block shapes do not align")
shape = (seq[0].shape[:axis]
+ (sum(a.shape[axis] for a in seq),)
+ seq[0].shape[axis + 1:])
chunks = ( seq[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq[0].chunks[axis + 1:])
name = next(concatenate_names)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
names = [a.name for a in seq]
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1:axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis+1]) - 1],)
+ key[axis + 2:]
for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
if all(a._dtype is not None for a in seq):
dt = reduce(np.promote_types, [a._dtype for a in seq])
else:
dt = None
return Array(dsk2, name, chunks, dtype=dt)
@wraps(np.take)
def take(a, indices, axis):
if not -a.ndim <= axis < a.ndim:
raise ValueError('axis=(%s) out of bounds' % axis)
if axis < 0:
axis += a.ndim
return a[(slice(None),) * axis + (indices,)]
@wraps(np.transpose)
def transpose(a, axes=None):
axes = axes or tuple(range(a.ndim))[::-1]
return atop(curry(np.transpose, axes=axes),
axes,
a, tuple(range(a.ndim)), dtype=a._dtype)
@curry
def many(a, b, binop=None, reduction=None, **kwargs):
"""
Apply binary operator to pairwise to sequences, then reduce.
>>> many([1, 2, 3], [10, 20, 30], mul, sum) # dot product
140
"""
return reduction(map(curry(binop, **kwargs), a, b))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
if len(left_axes) > 1:
raise NotImplementedError("Simultaneous Contractions of multiple "
"indices not yet supported")
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
out_index.remove(left_index[l])
right_index[r] = left_index[l]
if lhs._dtype is not None and rhs._dtype is not None :
dt = np.promote_types(lhs._dtype, rhs._dtype)
else:
dt = None
func = many(binop=np.tensordot, reduction=sum,
axes=(left_axes, right_axes))
return atop(func,
out_index,
lhs, tuple(left_index),
rhs, tuple(right_index), dtype=dt)
def insert_to_ooc(out, arr):
lock = Lock()
def store(x, index):
with lock:
out[index] = np.asanyarray(x)
return None
slices = slices_from_chunks(arr.chunks)
name = 'store-%s' % arr.name
dsk = dict(((name,) + t[1:], (store, t, slc))
for t, slc in zip(core.flatten(arr._keys()), slices))
return dsk
def partial_by_order(op, other):
"""
>>> f = partial_by_order(add, [(1, 10)])
>>> f(5)
15
"""
def f(*args):
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return op(*args2)
return f
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See also
--------
atop
"""
name = kwargs.get('name') or next(names)
out_ndim = max(len(arg.shape) if isinstance(arg, Array) else 0
for arg in args)
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [arg for arg in args if isinstance(arg, Array)]
other = [(i, arg) for i, arg in enumerate(args) if not isinstance(arg, Array)]
if 'dtype' in kwargs:
dt = kwargs['dtype']
elif not all(a._dtype is not None for a in arrays):
dt = None
else:
vals = [np.empty((1,) * a.ndim, dtype=a.dtype)
if hasattr(a, 'dtype') else a
for a in args]
try:
dt = op(*vals).dtype
except AttributeError:
dt = None
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
return atop(op2, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays),
dtype=dt, name=name)
def wrap_elemwise(func, **kwargs):
""" Wrap up numpy function into dask.array """
f = partial(elemwise, func, **kwargs)
f.__doc__ = func.__doc__
f.__name__ = func.__name__
return f
# ufuncs, copied from this page:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# math operations
logaddexp = wrap_elemwise(np.logaddexp)
logaddexp2 = wrap_elemwise(np.logaddexp2)
conj = wrap_elemwise(np.conj)
exp = wrap_elemwise(np.exp)
log = wrap_elemwise(np.log)
log2 = wrap_elemwise(np.log2)
log10 = wrap_elemwise(np.log10)
log1p = wrap_elemwise(np.log1p)
expm1 = wrap_elemwise(np.expm1)
sqrt = wrap_elemwise(np.sqrt)
square = wrap_elemwise(np.square)
# trigonometric functions
sin = wrap_elemwise(np.sin)
cos = wrap_elemwise(np.cos)
tan = wrap_elemwise(np.tan)
arcsin = wrap_elemwise(np.arcsin)
arccos = wrap_elemwise(np.arccos)
arctan = wrap_elemwise(np.arctan)
arctan2 = wrap_elemwise(np.arctan2)
hypot = wrap_elemwise(np.hypot)
sinh = wrap_elemwise(np.sinh)
cosh = wrap_elemwise(np.cosh)
tanh = wrap_elemwise(np.tanh)
arcsinh = wrap_elemwise(np.arcsinh)
arccosh = wrap_elemwise(np.arccosh)
arctanh = wrap_elemwise(np.arctanh)
deg2rad = wrap_elemwise(np.deg2rad)
rad2deg = wrap_elemwise(np.rad2deg)
# comparison functions
logical_and = wrap_elemwise(np.logical_and, dtype='bool')
logical_or = wrap_elemwise(np.logical_or, dtype='bool')
logical_xor = wrap_elemwise(np.logical_xor, dtype='bool')
logical_not = wrap_elemwise(np.logical_not, dtype='bool')
maximum = wrap_elemwise(np.maximum)
minimum = wrap_elemwise(np.minimum)
fmax = wrap_elemwise(np.fmax)
fmin = wrap_elemwise(np.fmin)
# floating functions
isreal = wrap_elemwise(np.isreal, dtype='bool')
iscomplex = wrap_elemwise(np.iscomplex, dtype='bool')
isfinite = wrap_elemwise(np.isfinite, dtype='bool')
isinf = wrap_elemwise(np.isinf, dtype='bool')
isnan = wrap_elemwise(np.isnan, dtype='bool')
signbit = wrap_elemwise(np.signbit, dtype='bool')
copysign = wrap_elemwise(np.copysign)
nextafter = wrap_elemwise(np.nextafter)
# modf: see below
ldexp = wrap_elemwise(np.ldexp)
# frexp: see below
fmod = wrap_elemwise(np.fmod)
floor = wrap_elemwise(np.floor)
ceil = wrap_elemwise(np.ceil)
trunc = wrap_elemwise(np.trunc)
# more math routines, from this page:
# http://docs.scipy.org/doc/numpy/reference/routines.math.html
degrees = wrap_elemwise(np.degrees)
radians = wrap_elemwise(np.radians)
rint = wrap_elemwise(np.rint)
fix = wrap_elemwise(np.fix)
angle = wrap_elemwise(np.angle)
real = wrap_elemwise(np.real)
imag = wrap_elemwise(np.imag)
clip = wrap_elemwise(np.clip)
fabs = wrap_elemwise(np.fabs)
sign = wrap_elemwise(np.fabs)
def frexp(x):
tmp = elemwise(np.frexp, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.frexp(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
frexp.__doc__ = np.frexp
def modf(x):
tmp = elemwise(np.modf, x)
left = next(names)
right = next(names)
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
if x._dtype is not None:
a = np.empty((1,), dtype=x._dtype)
l, r = np.modf(a)
ldt = l.dtype
rdt = r.dtype
else:
ldt = None
rdt = None
L = Array(merge(tmp.dask, ldsk), left, chunks=tmp.chunks,
dtype=ldt)
R = Array(merge(tmp.dask, rdsk), right, chunks=tmp.chunks,
dtype=rdt)
return L, R
modf.__doc__ = np.modf
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def isnull(values):
""" pandas.isnull for dask arrays """
import pandas as pd
return elemwise(pd.isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(numpy_compat.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(numpy_compat.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
where_error_message = """
The dask.array version of where only handles the three argument case.
da.where(x > 0, x, 0)
and not the single argument case
da.where(x > 0)
This is because dask.array operations must be able to infer the shape of their
outputs prior to execution. The number of positive elements of x requires
execution. See the ``np.where`` docstring for examples and the following link
for a more thorough explanation:
http://dask.pydata.org/en/latest/array-overview.html#construct
""".strip()
@wraps(np.where)
def where(condition, x=None, y=None):
if x is None or y is None:
raise TypeError(where_error_message)
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes):
if not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i]):
raise ValueError(
"Coarsening factor does not align with block dimensions")
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = next(names)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes))
for key in core.flatten(x._keys()))
chunks = tuple(tuple(int(bd / axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
if x._dtype is not None:
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
else:
dt = None
return Array(merge(x.dask, dsk), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
if not -arr.ndim <= axis < arr.ndim:
raise IndexError('axis %r is out of bounds for an array of dimension '
'%s' % (axis, arr.ndim))
if axis < 0:
axis += arr.ndim
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = next(names)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(chunk.broadcast_to)
def broadcast_to(x, shape):
shape = tuple(shape)
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(new != old
for new, old in zip(shape[ndim_new:], x.shape)
if old != 1):
raise ValueError('cannot broadcast shape %s to shape %s'
% (x.shape, shape))
name = next(names)
chunks = (tuple((s,) for s in shape[:ndim_new])
+ tuple(bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape,
shape[ndim_new:])))
dsk = dict(((name,) + (0,) * ndim_new + key[1:],
(chunk.broadcast_to, key,
shape[:ndim_new] +
tuple(bd[i] for i, bd in zip(key[1:], chunks[ndim_new:]))))
for key in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, chunks, dtype=x.dtype)
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
fromfunction_names = ('fromfunction-%d' % i for i in count(1))
@wraps(np.fromfunction)
def fromfunction(func, chunks=None, shape=None, dtype=None):
name = next(fromfunction_names)
if chunks:
chunks = normalize_chunks(chunks, shape)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shape)
for offset, shape in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
@wraps(np.unique)
def unique(x):
name = next(names)
dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
parts = get(merge(dsk, x.dask), list(dsk.keys()))
return np.unique(np.concatenate(parts))
def write_hdf5_chunk(fn, datapath, index, data):
import h5py
with h5py.File(fn) as f:
d = f[datapath]
d[index] = data
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
name = 'bincount' + next(tokens)
if weights is not None:
dsk = dict(((name, i),
(np.bincount, (x.name, i), (weights.name, i), minlength))
for i, _ in enumerate(x._keys()))
dtype = 'f8'
else:
dsk = dict(((name, i),
(np.bincount, (x.name, i), None, minlength))
for i, _ in enumerate(x._keys()))
dtype = 'i8'
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum' + next(tokens)
dsk[(name, 0)] = (np.sum, (list, list(dsk)), 0)
chunks = ((minlength,),)
dsk.update(x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
"""
result = []
dim = 0
while isinstance(arrays, (list, tuple)):
result.append(tuple(deepfirst(a).shape[dim] for a in arrays))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
else:
return 1 + ndimlist(seq[0])
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
arrays = concrete(arrays)
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
result = np.empty(shape=shape, dtype=deepfirst(arrays).dtype)
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
| bsd-3-clause |
pprett/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 25 | 16022 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_moons
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(dist_sqr, 1.0,
err_msg="Point is not on expected unit circle")
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/discriminant_analysis.py | 32 | 27308 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
ilyes14/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
mhue/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
cyber-meow/Robotic_state_representation_learning | inter/interaction.py | 1 | 2565 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from inter.interfaces import Environment, Bot
class Interaction(object):
"""
This is rather for test purposes once the robot has learned something
from its experience.
For the main interaction class please look at Interaction.
"""
def __init__(self, env, bot):
assert isinstance(env, Environment)
assert isinstance(bot, Bot)
self.env = env
self.bot = bot
@property
def bot(self):
return self._bot
@bot.setter
def bot(self, bot):
self._bot = bot
self._bot.actions = self.env.actions
self._reward = 0
@property
def env(self):
return self._env
@env.setter
def env(self, env):
self._env = env
self._observation = env.observation()
self._env_state_his = []
@property
def env_state_his(self):
return self._env_state_his
def interact_no_learn(self):
self._env_state_his.append(self.env.state)
action = self.bot.decision(self._observation)
exp = self._observation, action, self._reward
self._observation, self._reward = self.env.act(action)
return exp
def interact(self):
exp = self.interact_no_learn()
self.bot.learn_from_experience(exp)
def interact_serie(self, iter_num):
for _ in range(iter_num):
self.interact()
def observation_serie(self, print_reward=True, path=None):
fig, ax = plt.subplots()
ax.axis('off')
obs = self.env.show_observation(self._observation)
im = plt.imshow(obs, interpolation='none', origin='lower')
def animate(*args):
self.interact_no_learn()
if print_reward:
print("reward: {}".format(self._reward))
im.set_array(self.env.show_observation(self._observation))
return im,
ani = animation.FuncAnimation(
fig, animate, 200, interval=120, blit=True)
if path is not None:
ani.save(path, writer='ffmpeg')
plt.show()
return ani
def compute_avg_reward(self, num_episode, num_step):
rewards = []
for _ in range(num_episode):
self.env.__init__()
reward_sum = 0
for _ in range(num_step):
self.interact_no_learn()
reward_sum += self._reward
rewards.append(reward_sum)
return np.mean(rewards), np.std(rewards)
| mit |
f3r/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 41 | 8901 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(v_measure_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
def test_max_n_classes():
rng = np.random.RandomState(seed=0)
labels_true = rng.rand(53)
labels_pred = rng.rand(53)
labels_zero = np.zeros(53)
labels_true[:2] = 0
labels_zero[:3] = 1
labels_pred[:2] = 0
for score_func in score_funcs:
expected = ("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
assert_raise_message(ValueError, expected, score_func,
labels_true, labels_pred,
max_n_classes=50)
expected = ("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
assert_raise_message(ValueError, expected, score_func,
labels_zero, labels_pred,
max_n_classes=50)
| bsd-3-clause |
TiedNets/TiedNets | plot_result_bars.py | 1 | 4139 | __author__ = 'Agostino Sturaro'
import os
import csv
import numpy as np
import matplotlib.pyplot as plt
# BEGIN user defined variables
input_fpath = os.path.normpath('../Simulations/MN_nets/MN_rnd_atk_reasons.tsv')
output_fpath = os.path.normpath('../Simulations/MN_nets/MN_rnd_atk_reasons.pdf')
# we need a single table row as input, identifying a set of experiments on the same instance type, done with the same
# value of the independent variable
instance_type = 'HINT'
indep_var_val = str(40)
val_key_name_suff = '_avg'
err_key_name_suff = '_std'
# END user defined variables
# adjustments to ensure relative paths will work
if not os.path.isabs(input_fpath) or not os.path.isabs(output_fpath):
this_dir = os.path.normpath(os.path.dirname(__file__))
os.chdir(this_dir)
# ensure that the specified input file exists
if os.path.isfile(input_fpath) is not True:
raise ValueError('Invalid value for parameter input_fpath')
# read values from file, by column
with open(input_fpath) as input_file:
values = csv.DictReader(input_file, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
fieldnames = list(values.fieldnames)
fieldnames.remove('Instance_type')
fieldnames.remove('Indep_var_val')
for line in values:
if line['Instance_type'] == instance_type:
if line['Indep_var_val'] == indep_var_val:
desired_line = line
desired_line.pop('Instance_type')
desired_line.pop('Indep_var_val')
break
bar_heights = list()
bar_names = list()
bar_errs = list()
# create lists of values and errors, iterate over field names to ensure dictionaries are read in the correct order
for key_name in fieldnames:
if key_name.endswith(val_key_name_suff):
bar_heights.append(float(desired_line[key_name])) # cast to float
bar_name = key_name[:-len(val_key_name_suff)] # remove suffix from label names (optional)
bar_names.append(bar_name)
elif key_name.endswith(err_key_name_suff):
bar_errs.append(float(desired_line[key_name])) # cast to float
# if we want a bar with the independent variable (optional)
bar_heights.insert(0, float(indep_var_val))
bar_names.insert(0, 'Initial attack on A')
bar_errs.insert(0, 0.0)
# if we want to express values as percentage of a whole (optional)
whole_value_pos = bar_names.index('Total_dead')
whole_value = bar_heights[whole_value_pos]
del bar_heights[whole_value_pos]
del bar_names[whole_value_pos]
del bar_errs[whole_value_pos]
for i in range(len(bar_heights)):
bar_heights[i] /= whole_value
bar_errs[i] /= whole_value
# use a space to separate words (optional)
for i in range(len(bar_names)):
bar_names[i] = bar_names[i].replace('_', ' ')
bar_cnt = len(bar_heights) # the number of bars in the graph
bar_width = 0.35 # the width of the bars
bar_pos = np.arange(bar_width, bar_cnt + bar_width) # the x locations for the groups
patterns = ('*', '+', 'x', '\\', '-', 'o', 'O', '.')
fig, ax = plt.subplots()
# draw bars
for i in range(len(bar_heights)):
bars = ax.bar(bar_pos[i], bar_heights[i], bar_width, color='w', label=bar_names[i], hatch=patterns[i])
# add some text for labels, title and axes ticks
ax.set_ylabel('Percentage of final dead nodes') # label of y axis
ax.set_xticks(bar_pos + bar_width / 2)
# turn off minor and major (both) tick marks on the x axis
plt.tick_params(axis='x', which='both', labelbottom='off')
# ax.set_xticklabels(bar_names, rotation=30, ha='right') # draw a label below each bar (optional)
# get the labels of all the bars in the graph
handles, labels = ax.get_legend_handles_labels()
# lgd = ax.legend(handles, labels, bbox_to_anchor=(-0.11, 1.02, 1.11, 0.), loc=3,
# ncol=3, mode="expand", borderaxespad=0., fontsize=16)
# lgd = ax.legend(handles, labels, bbox_to_anchor=(-0.11, -0.02, 1.11, 0.), loc=1,
# ncol=3, mode="expand", borderaxespad=0., fontsize=16)
lgd = ax.legend(handles, labels, loc='center left', bbox_to_anchor=(0.99, 0.5), fontsize=16)
plt.ylim(0.0, plt.ylim()[1]) # cap y axis at zero
plt.savefig(output_fpath, bbox_inches='tight')
plt.show()
| gpl-3.0 |
UNR-AERIAL/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
wanglei828/apollo | modules/tools/plot_planning/imu_speed_jerk.py | 1 | 3753 | #!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
from record_reader import RecordItemReader
from imu_speed_acc import ImuSpeedAcc
class ImuSpeedJerk:
def __init__(self):
self.timestamp_list = []
self.jerk_list = []
self.imu_speed_acc = ImuSpeedAcc()
def add(self, location_est):
self.imu_speed_acc.add(location_est)
acc_timestamp_list = self.imu_speed_acc.get_timestamp_list()
if len(acc_timestamp_list) <= 0:
return
index_500ms = len(acc_timestamp_list) - 1
found_index_500ms = False
last_timestamp = acc_timestamp_list[-1]
while index_500ms >= 0:
current_timestamp = acc_timestamp_list[index_500ms]
if (last_timestamp - current_timestamp) >= 0.5:
found_index_500ms = True
break
index_500ms -= 1
if found_index_500ms:
acc_list = self.imu_speed_acc.get_acc_list()
jerk = (acc_list[-1] - acc_list[index_500ms]) / \
(acc_timestamp_list[-1] - acc_timestamp_list[index_500ms])
self.jerk_list.append(jerk)
self.timestamp_list.append(acc_timestamp_list[-1])
def get_jerk_list(self):
return self.jerk_list
def get_timestamp_list(self):
return self.timestamp_list
def get_lastest_jerk(self):
if len(self.jerk_list) > 0:
return self.jerk_list[-1]
else:
return None
def get_lastest_timestamp(self):
if len(self.timestamp_list) > 0:
return self.timestamp_list[-1]
else:
return None
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
from os.path import isfile, join
folders = sys.argv[1:]
fig, ax = plt.subplots(1, 1)
colors = ["g", "b", "r", "m", "y"]
markers = ["o", "o", "o", "o"]
for i in range(len(folders)):
x = []
y = []
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
fns.sort()
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
processor = ImuSpeedJerk()
last_pose_data = None
last_chassis_data = None
topics = ["/apollo/localization/pose"]
for data in reader.read(topics):
if "pose" in data:
last_pose_data = data["pose"]
processor.add(last_pose_data)
data_x = processor.get_timestamp_list()
data_y = processor.get_jerk_list()
x.extend(data_x)
y.extend(data_y)
if len(x) <= 0:
continue
ax.scatter(x, y, c=color, marker=marker, alpha=0.4)
#ax.plot(x, y, c=color, alpha=0.4)
ax.set_xlabel('Timestamp')
ax.set_ylabel('Jerk')
plt.show()
| apache-2.0 |
harisbal/pandas | pandas/tests/io/sas/test_sas7bdat.py | 4 | 8326 | import pandas as pd
from pandas.compat import PY2
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.errors import EmptyDataError
import os
import io
import numpy as np
import pytest
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestSAS7BDAT(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "sas", "data")
self.data = []
self.test_ix = [list(range(1, 16)), [16]]
for j in 1, 2:
fname = os.path.join(
self.dirpath, "test_sas7bdat_{j}.csv".format(j=j))
df = pd.read_csv(fname)
epoch = pd.datetime(1960, 1, 1)
t1 = pd.to_timedelta(df["Column4"], unit='d')
df["Column4"] = epoch + t1
t2 = pd.to_timedelta(df["Column12"], unit='d')
df["Column12"] = epoch + t2
for k in range(df.shape[1]):
col = df.iloc[:, k]
if col.dtype == np.int64:
df.iloc[:, k] = df.iloc[:, k].astype(np.float64)
elif col.dtype == np.dtype('O'):
if PY2:
f = lambda x: (x.decode('utf-8') if
isinstance(x, str) else x)
df.iloc[:, k] = df.iloc[:, k].apply(f)
self.data.append(df)
def test_from_file(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(
self.dirpath, "test{k}.sas7bdat".format(k=k))
df = pd.read_sas(fname, encoding='utf-8')
tm.assert_frame_equal(df, df0)
def test_from_buffer(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(
self.dirpath, "test{k}.sas7bdat".format(k=k))
with open(fname, 'rb') as f:
byts = f.read()
buf = io.BytesIO(byts)
rdr = pd.read_sas(buf, format="sas7bdat",
iterator=True, encoding='utf-8')
df = rdr.read()
tm.assert_frame_equal(df, df0, check_exact=False)
rdr.close()
def test_from_iterator(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(
self.dirpath, "test{k}.sas7bdat".format(k=k))
rdr = pd.read_sas(fname, iterator=True, encoding='utf-8')
df = rdr.read(2)
tm.assert_frame_equal(df, df0.iloc[0:2, :])
df = rdr.read(3)
tm.assert_frame_equal(df, df0.iloc[2:5, :])
rdr.close()
@td.skip_if_no('pathlib')
def test_path_pathlib(self):
from pathlib import Path
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = Path(os.path.join(
self.dirpath, "test{k}.sas7bdat".format(k=k)))
df = pd.read_sas(fname, encoding='utf-8')
tm.assert_frame_equal(df, df0)
@td.skip_if_no('py.path')
def test_path_localpath(self):
from py.path import local as LocalPath
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = LocalPath(os.path.join(
self.dirpath, "test{k}.sas7bdat".format(k=k)))
df = pd.read_sas(fname, encoding='utf-8')
tm.assert_frame_equal(df, df0)
def test_iterator_loop(self):
# github #13654
for j in 0, 1:
for k in self.test_ix[j]:
for chunksize in 3, 5, 10, 11:
fname = os.path.join(
self.dirpath, "test{k}.sas7bdat".format(k=k))
rdr = pd.read_sas(fname, chunksize=10, encoding='utf-8')
y = 0
for x in rdr:
y += x.shape[0]
assert y == rdr.row_count
rdr.close()
def test_iterator_read_too_much(self):
# github #14734
k = self.test_ix[0][0]
fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k))
rdr = pd.read_sas(fname, format="sas7bdat",
iterator=True, encoding='utf-8')
d1 = rdr.read(rdr.row_count + 20)
rdr.close()
rdr = pd.read_sas(fname, iterator=True, encoding="utf-8")
d2 = rdr.read(rdr.row_count + 20)
tm.assert_frame_equal(d1, d2)
rdr.close()
def test_encoding_options(datapath):
fname = datapath("io", "sas", "data", "test1.sas7bdat")
df1 = pd.read_sas(fname)
df2 = pd.read_sas(fname, encoding='utf-8')
for col in df1.columns:
try:
df1[col] = df1[col].str.decode('utf-8')
except AttributeError:
pass
tm.assert_frame_equal(df1, df2)
from pandas.io.sas.sas7bdat import SAS7BDATReader
rdr = SAS7BDATReader(fname, convert_header_text=False)
df3 = rdr.read()
rdr.close()
for x, y in zip(df1.columns, df3.columns):
assert(x == y.decode())
def test_productsales(datapath):
fname = datapath("io", "sas", "data", "productsales.sas7bdat")
df = pd.read_sas(fname, encoding='utf-8')
fname = datapath("io", "sas", "data", "productsales.csv")
df0 = pd.read_csv(fname, parse_dates=['MONTH'])
vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"]
df0[vn] = df0[vn].astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_12659(datapath):
fname = datapath("io", "sas", "data", "test_12659.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "test_12659.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_airline(datapath):
fname = datapath("io", "sas", "data", "airline.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "airline.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0, check_exact=False)
def test_date_time(datapath):
# Support of different SAS date/datetime formats (PR #15871)
fname = datapath("io", "sas", "data", "datetime.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "datetime.csv")
df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime',
'DateTimeHi', 'Taiw'])
# GH 19732: Timestamps imported from sas will incur floating point errors
df.iloc[:, 3] = df.iloc[:, 3].dt.round('us')
tm.assert_frame_equal(df, df0)
def test_compact_numerical_values(datapath):
# Regression test for #21616
fname = datapath("io", "sas", "data", "cars.sas7bdat")
df = pd.read_sas(fname, encoding='latin-1')
# The two columns CYL and WGT in cars.sas7bdat have column
# width < 8 and only contain integral values.
# Test that pandas doesn't corrupt the numbers by adding
# decimals.
result = df['WGT']
expected = df['WGT'].round()
tm.assert_series_equal(result, expected, check_exact=True)
result = df['CYL']
expected = df['CYL'].round()
tm.assert_series_equal(result, expected, check_exact=True)
def test_many_columns(datapath):
# Test for looking for column information in more places (PR #22628)
fname = datapath("io", "sas", "data", "many_columns.sas7bdat")
df = pd.read_sas(fname, encoding='latin-1')
fname = datapath("io", "sas", "data", "many_columns.csv")
df0 = pd.read_csv(fname, encoding='latin-1')
tm.assert_frame_equal(df, df0)
def test_inconsistent_number_of_rows(datapath):
# Regression test for issue #16615. (PR #22628)
fname = datapath("io", "sas", "data", "load_log.sas7bdat")
df = pd.read_sas(fname, encoding='latin-1')
assert len(df) == 2097
def test_zero_variables(datapath):
# Check if the SAS file has zero variables (PR #18184)
fname = datapath("io", "sas", "data", "zero_variables.sas7bdat")
with pytest.raises(EmptyDataError):
pd.read_sas(fname)
| bsd-3-clause |
q1ang/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
endolith/scikit-image | doc/examples/edges/plot_canny.py | 4 | 1602 | """
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
sharex=True, sharey=True)
ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)
fig.tight_layout()
plt.show()
| bsd-3-clause |
dyoung418/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 8 | 4104 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
belltailjp/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.py | 12 | 8157 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
yunfeilu/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
qjcg/dotfiles | .ipython/profile_default/ipython_config.py | 1 | 18940 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py
# c.TerminalIPythonApp.force_interact = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = '/home/john/.config/ipython'
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = 'default'
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
#
# c.TerminalInteractiveShell.separate_out2 = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
#
# c.TerminalInteractiveShell.debug = False
#
# c.TerminalInteractiveShell.history_length = 10000
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 3.3.3 (default, Nov 26 2013, 13:33:18) \nType "copyright", "credits" or "license" for more information.\n\nIPython 1.1.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.xmode = 'Context'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
#
# c.TerminalInteractiveShell.readline_use = True
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
#
# c.TerminalInteractiveShell.quiet = False
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vim'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
c.TerminalInteractiveShell.confirm_exit = False
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
#
# c.PromptManager.color_scheme = 'Linux'
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
#
# c.HistoryManager.db_log_output = False
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = ''
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| gpl-3.0 |
schreiberx/sweet | mule_local/python/mule_local/rexi/brexi/BREXI.py | 1 | 4254 | #! /usr/bin/env python3
import sys
import numpy
import mule_local.rexi.brexi.rk_co as rk_co
import mule_local.rexi.EFloat as ef
from mule_local.rexi.REXICoefficients import *
from mule_local.rexi.Functions import *
class BREXI:
def __init__(
self,
efloat_mode = None,
):
self.efloat = ef.EFloat(efloat_mode)
def setup(
self,
N,
quadrature_method = "gauss_legendre",
tanhsinh_h = None
):
self.N = N
self.quadrature_method = quadrature_method
self.quadrature_method_short = quadrature_method[0].upper()
co = rk_co.rk_co(N, quadrature_method)
"""
Step 1) Diagonalization
"""
"""
Compute Eigendecomposition with Eigenvalues in D
"""
D, E = numpy.linalg.eig(co.A)
"""
Compute diagonal-only W so that
W * E^-1 * ONES = ONES
with ONES a vector with only ones
"""
#
# E^-1 * ONES = eta
# <=> E*eta = ONE
# <=> eta = solve(E, ONE)
#
eta = numpy.linalg.solve(E, numpy.ones(N))
#
# W * eta = ONES
# diag-only W
# diag(W) = eta^-1
# diag(W_inv) = eta
#
W_inv = numpy.diag(eta)
b_tilde = co.b.T.dot(E.dot(W_inv))
# Create modified/unified REXI form
"""
Step 2) Get REXI formulation
"""
gamma = 1.0 - numpy.sum(b_tilde/D)
alphas = 1/D
betas = -b_tilde/(D*D)
coeffs = REXICoefficients()
coeffs.function_name = "phi0"
coeffs.efloat = self.efloat
coeffs.alphas = alphas
coeffs.betas = betas
coeffs.gamma = gamma
coeffs.unique_id_string = self.getUniqueId()
return coeffs
def getUniqueId(self):
return "BREXI_"+self.quadrature_method_short+"_"+str(self.N)
if __name__ == "__main__":
numpy.set_printoptions(precision=20)
for method in ['gauss_legendre', 'gauss_chebyshev_u', 'gauss_chebyshev_t', 'gauss_hermite', 'gauss_lobatto', 'gauss_jacobi']:
print("Method: "+method)
brexi = BREXI()
N=16
coeffs = brexi.setup(N=N, quadrature_method=method)
filepre = 'brexi_'+method
# Convert to floating point
coeffs = coeffs.toFloat()
print("Alphas:")
for i in coeffs.alphas:
print(" + "+str(i))
print("")
print("Betas:")
for i in coeffs.betas:
print(" + "+str(i))
print("")
print("Gamma:")
print(coeffs.gamma)
print("")
if True:
#if False:
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
x = np.real(coeffs.alphas)
y = np.imag(coeffs.alphas)
plt.clf()
plt.scatter(x, y)
plt.savefig('output_'+filepre+'_alphas.pdf')
plt.clf()
plt.scatter(x, y, s=np.log(np.absolute(coeffs.betas))+1.0)
plt.savefig('output_'+filepre+'_alphas_scaled.pdf')
x = np.real(coeffs.betas)
y = np.imag(coeffs.betas)
plt.clf()
plt.scatter(x, y)
plt.savefig('output_'+filepre+'_betas.pdf')
#
# Error plot
#
plt.clf()
function = Functions(
function_name = "phi0",
efloat_mode = "float"
)
test_range = [-N, N]
num_test_samples = 4096
max_error = 0
xpts = np.linspace(test_range[0], test_range[1], num_test_samples)
yvals = np.zeros(num_test_samples)
for i in range(num_test_samples):
x = xpts[i]
lam = 1j*x
y = function.eval(lam)
yn = coeffs.eval(lam)
err = np.abs(y-yn)
yvals[i] = err
plt.plot(xpts, yvals, 'r-')
plt.xlim(test_range[0], test_range[1])
plt.ylim(1e-12, 10)
plt.yscale("log")
plt.savefig('output_'+filepre+'_error_oscillatory.pdf')
| mit |
226262/Neural-Network-Digit-Recognition | plotter.py | 1 | 3993 | import os
import numpy
import matplotlib.pyplot as plt
import pygame, random
class Plotter:
width=300
height=width
array=numpy.full((width,height),0)
xMin=width
xMax=0
yMin=height
yMax=0
edge=0
isAnythingDrew = False
def write_rad(self,x,y,promien):
if promien>0:
if (x-promien)>0 and (x+promien)<self.width and (y-promien)>0 and (y+promien)<self.width:
j=0
for x in range(x-promien,x+promien+1):
if j<=promien:
self.array[x][y+j]=1
self.array[x][y-j]=1
j=j+1
if j>promien:
j=j-1
self.array[x][y+j]
self.write_rad(x,y,promien-1)
def cut_and_scale_down(self):
if (self.yMax-self.yMin)>=(self.xMax-self.xMin):
edge=self.yMax-self.yMin
else:
edge=self.xMax-self.xMin
frame=56
sideFrame=(frame/2)
tmp_array=numpy.full(((edge+frame),(edge+frame)),0)
tmp_scaled_array=numpy.full((28,28),0)
for j in range(int((((edge+frame)/2)-(self.xMax-self.xMin)/2)),int(((edge+frame)/2)+((self.xMax-self.xMin)/2))):
for i in range(int(sideFrame),int(edge+sideFrame)):
tmp_array[i][j]=self.array[self.yMin+i-int(sideFrame)][self.xMin+j-int(((edge+frame)/2)-((self.xMax-self.xMin)/2))]
for i in range(0,(edge+frame-1)):
for j in range(0,(edge+frame-1)):
if tmp_array[i][j]==1:
tmp_scaled_array[int((i*28)/(edge+frame))][int((j*28)/(edge+frame))]=1
self.array=tmp_scaled_array
# print(self.array)
def input_stuff(self):
screen = pygame.display.set_mode((self.width,self.height))
draw_on = False
last_pos = (0, 0)
color = (255, 255, 255)
radius = 3
def roundline(srf, color, start, end, radius=1):
self.isAnythingDrew = True
dx = end[0]-start[0]
dy = end[1]-start[1]
distance = max(abs(dx), abs(dy))
for i in range(distance):
x = int( start[0]+float(i)/distance*dx)
y = int( start[1]+float(i)/distance*dy)
if x<self.xMin:
self.xMin=x
if x>self.xMax:
self.xMax=x
if y<self.yMin:
self.yMin=y
if y>self.yMax:
self.yMax=y
self.write_rad(y,x,2)
pygame.draw.circle(srf, color, (x, y), radius)
try:
while True:
e = pygame.event.wait()
if e.type == pygame.QUIT:
raise StopIteration
if e.type == pygame.MOUSEBUTTONDOWN:
# color = (255, 255, 255)
# pygame.draw.circle(screen, color, e.pos, radius)
draw_on = True
if e.type == pygame.MOUSEBUTTONUP:
draw_on = False
if e.type == pygame.MOUSEMOTION:
if draw_on:
pygame.draw.circle(screen, color, e.pos, radius)
roundline(screen, color, e.pos, last_pos, radius)
last_pos = e.pos
pygame.display.flip()
except StopIteration:
pass
pygame.quit()
if(self.isAnythingDrew):
self.cut_and_scale_down()
return self.array
else:
print("You haven't drew anything :c")
exit()
def flush(self):
self.array=numpy.full((self.width,self.height),0)
self.xMin=self.width
self.xMax=0
self.yMin=self.height
self.yMax=0
self.edge=0
self.isAnythingDrew = False
| gpl-3.0 |
Tong-Chen/scikit-learn | examples/cluster/plot_dict_face_patches.py | 12 | 2723 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 15 patches from this image. Once we have accumulated
750 of these patches (using 50 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import pylab as pl
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 4 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
pl.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
pl.subplot(9, 9, i + 1)
pl.imshow(patch.reshape(patch_size), cmap=pl.cm.gray,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
pl.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
pl.show()
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/utils/validation.py | 8 | 25965 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from ..exceptions import NonBLASDotWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Got X with X.ndim=1. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.")
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
gfrubi/GR | figuras-editables/fig-Killing-S2.py | 2 | 2263 | from mpl_toolkits.mplot3d import axes3d
from matplotlib.pyplot import * # requiere version 1.4 !!
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from numpy import *
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
from scipy.stats import cosine
#theta,phi=meshgrid(linspace(0,pi,20),linspace(0,2*pi,30))
theta,phi=meshgrid(arccos(linspace(1,-1,20)),linspace(0,2*pi,30))
x=sin(theta)*cos(phi)
y=sin(theta)*sin(phi)
z=cos(theta)
thetas,phis=meshgrid(linspace(0,pi,100),linspace(0,2*pi,100))
xs=sin(thetas)*cos(phis)
ys=sin(thetas)*sin(phis)
zs=cos(thetas)
def decorado():
ax.set_axis_off()
a = Arrow3D([0, 1.3], [0,0], [0,0], mutation_scale=15, lw=2, arrowstyle="->", color="k")
ax.add_artist(a)
a = Arrow3D([0, 0], [0,1.3], [0,0], mutation_scale=15, lw=2, arrowstyle="->", color="k")
ax.add_artist(a)
a = Arrow3D([0, 0], [0,0], [0,1.3], mutation_scale=15, lw=2, arrowstyle="->", color="k")
ax.add_artist(a)
ax.text(1.35,0,0,'$x$',size=20)
ax.text(0,1.35,0,'$y$',size=20)
ax.text(0,0,1.35,'$z$',size=20)
f1 = figure(figsize=(10,10))
ax = f1.gca(projection='3d')
u = 0
v = -z
w = y
ax.plot_surface(xs, ys, zs, rstride=1, cstride=1, color='y',linewidth=0)
ax.quiver(x, y, z, u, v, w,length=.09,arrow_length_ratio=0.5)
decorado()
# In[38]:
f2 = figure(figsize=(10,10))
ax = f2.gca(projection='3d')
u = z
v = 0
w = -x
ax.plot_surface(xs, ys, zs, rstride=1, cstride=1, color='y',linewidth=0)
ax.quiver(x, y, z, u, v, w,length=.09,arrow_length_ratio=0.5)
decorado()
# In[39]:
f3 = figure(figsize=(10,10))
ax = f3.gca(projection='3d')
u = -y
v = x
w = 0
ax.plot_surface(xs, ys, zs, rstride=1, cstride=1, color='y',linewidth=0)
ax.quiver(x, y, z, u, v, w,length=.09,arrow_length_ratio=0.5)
decorado()
# In[44]:
f1.savefig('KS2x.svg')
f2.savefig('KS2y.svg')
f3.savefig('KS2z.svg')
| gpl-3.0 |
and2egg/philharmonic | philharmonic/explorer.py | 2 | 4700 | import itertools
import numpy as np
import pandas as pd
from philharmonic import conf
from philharmonic.simulator.simulator import run
from philharmonic.logger import info
from philharmonic.utils import loc
def _generate_range(min_value, max_value, resolution):
return np.arange(min_value, max_value + resolution, resolution)
def eq(a, b, eps=0.0001):
"""float equality"""
return abs(a - b) <= eps
# different domain-specific combination-generating methods
class ParameterSpace(object):
"""represents a space of parameter combinations to explore"""
def __init__(self):
"""generate all the combinations"""
raise NotImplementedError
def apply(combination):
"""apply a single combination"""
raise NotImplementedError
class GAWeights(ParameterSpace):
"""vary the weights of the genetic algorithm's
fitness function components
"""
def __init__(self):
w_util = _generate_range(conf.w_util_min, conf.w_util_max,
conf.resolution)
w_cost = _generate_range(conf.w_cost_min, conf.w_cost_max,
conf.resolution)
w_sla = _generate_range(conf.w_sla_min, conf.w_sla_max, conf.resolution)
w_constraint = _generate_range(conf.w_constraint_min,
conf.w_constraint_max,
conf.resolution)
parameter_ranges = [w_util, w_cost, w_sla, w_constraint]
#len(list(itertools.product(*parameter_ranges)))
combinations = [combo for combo in itertools.product(*parameter_ranges)\
if eq(np.sum(combo), 1.)]
#return combinations
cnames = ['w_util', 'w_cost', 'w_sla', 'w_constraint']
self.combinations = pd.DataFrame(combinations,
columns = cnames)
#columns = ['w_ct', 'w_q', 'w_up', 'w_cd'])
def apply(self, combination):
"""update the config with the current combination of parameters"""
w_util, w_cost, w_sla, w_constraint = combination
print(w_util, w_cost, w_sla, w_constraint)
conf.gaconf['w_util'] = w_util
conf.gaconf['w_cost'] = w_cost
conf.gaconf['w_sla'] = w_sla
conf.gaconf['w_constraint'] = w_constraint
class TimeOffsets(ParameterSpace):
"""start the simulation with a time offset (also shifting VM requests)"""
def __init__(self):
starts = []
offsets = []
self.original_start = conf.start
self.original_times = conf.times
latest_start = self.original_start + conf.time_offsets_max
offset = conf.time_offsets_start
start = self.original_start + offset
while start < latest_start:
starts.append(start)
offsets.append(offset)
start = start + conf.time_offsets_step
offset = start - self.original_start
self.combinations = pd.DataFrame({'offset': offsets, 'starts': starts})
def apply(self, combination):
offset, start = combination
conf.times = self.original_times + offset
conf.start = conf.times[0]
conf.end = conf.times[-1]
conf.factory['requests_offset'] = offset
def generate_combinations():
"""generate a search space of all the possible combinations"""
return globals()[conf.parameter_space]()
def _run_simulation():
results = run()
return results
def _process_results(all_results, new_results):
cost = new_results['Total cost ($)']
all_results['cost'].append(cost)
def _serialise_results(results):
results.to_pickle(loc('exploration_results.pkl'))
# TODO: maybe this function should be a method of ParameterSpace
def _iterate_run(parameter_space):
"""iterate over all the combinations and run the simulation"""
combinations = parameter_space.combinations
all_results = {'cost': []}
for i in combinations.index:
info('\n' + '#' * 30 +
'\nExploration iteration ' +
'{}/{}\n'.format(i + 1,
len(combinations.index)) +
'#' * 30 + '\n')
parameter_space.apply(combinations.ix[i])
new_results = _run_simulation()
info(new_results)
_process_results(all_results, new_results)
results = pd.merge(combinations, pd.DataFrame(all_results),
left_index=True, right_index=True)
_serialise_results(results)
info('\nResults\n--------\n{}'.format(results))
return results
def explore():
"""explore different parameters"""
parameter_space = generate_combinations()
results = _iterate_run(parameter_space)
| gpl-3.0 |
hsiaoyi0504/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| gpl-2.0 |
Erotemic/ibeis | ibeis/algo/graph/tests/mst_debug.py | 1 | 5614 | import networkx as nx
import utool as ut
import pandas as pd
edges = {
2234: {5383: {'decision': 'match', 'reviewed_tags': ['needswork']}},
2265: {},
2280: {},
2654: {5334: {'decision': 'match',
'reviewed_tags': ['needswork', 'viewpoint', 'correctable', 'orientation']}},
5334: {2654: {'decision': 'match',
'reviewed_tags': ['needswork', 'viewpoint', 'correctable', 'orientation']},
5344: {'decision': 'match', 'reviewed_tags': []},
5383: {'decision': 'match', 'reviewed_tags': []}},
5338: {5344: {'decision': 'match', 'reviewed_tags': []},
5383: {'decision': 'match', 'reviewed_tags': []}},
5344: {5334: {'decision': 'match', 'reviewed_tags': []},
5338: {'decision': 'match', 'reviewed_tags': []},
5349: {'decision': 'match', 'reviewed_tags': []},
5383: {'decision': 'match', 'reviewed_tags': []},
5430: {'decision': 'match', 'reviewed_tags': []}},
5349: {5344: {'decision': 'match', 'reviewed_tags': []},
5399: {'decision': 'match', 'reviewed_tags': []}},
5383: {2234: {'decision': 'match', 'reviewed_tags': ['needswork']},
5334: {'decision': 'match', 'reviewed_tags': []},
5338: {'decision': 'match', 'reviewed_tags': []},
5344: {'decision': 'match', 'reviewed_tags': []},
5430: {'decision': 'match', 'reviewed_tags': []}},
5399: {5349: {'decision': 'match', 'reviewed_tags': []}},
5430: {5344: {'decision': 'match', 'reviewed_tags': []},
5383: {'decision': 'match', 'reviewed_tags': []}}
}
nodes = {2234: {'aid': 2234, 'name_label': 5977, 'orig_name_label': 5977},
2265: {'aid': 2265, 'name_label': 5977, 'orig_name_label': 5977},
2280: {'aid': 2280, 'name_label': 5977, 'orig_name_label': 5977},
2654: {'aid': 2654, 'name_label': 5977, 'orig_name_label': 5977},
5334: {'aid': 5334, 'name_label': 5977, 'orig_name_label': 5977},
5338: {'aid': 5338, 'name_label': 5977, 'orig_name_label': 5977},
5344: {'aid': 5344, 'name_label': 5977, 'orig_name_label': 5977},
5349: {'aid': 5349, 'name_label': 5977, 'orig_name_label': 5977},
5383: {'aid': 5383, 'name_label': 5977, 'orig_name_label': 5977},
5399: {'aid': 5399, 'name_label': 5977, 'orig_name_label': 5977},
5430: {'aid': 5430, 'name_label': 5977, 'orig_name_label': 5977}}
graph = nx.Graph(edges)
graph.add_nodes_from(nodes.keys())
df = pd.DataFrame.from_dict(nodes, orient='index')
nx.set_node_attributes(graph, name='orig_name_label', values=ut.dzip(df['aid'], df['orig_name_label']))
nx.set_node_attributes(graph, name='name_label', values=ut.dzip(df['aid'], df['name_label']))
aug_graph = graph
node_to_label = nx.get_node_attributes(graph, 'name_label')
aid1, aid2 = 2265, 2280
label_to_nodes = ut.group_items(node_to_label.keys(),
node_to_label.values())
aug_graph = graph.copy()
# remove cut edges from augmented graph
edge_to_iscut = nx.get_edge_attributes(aug_graph, 'is_cut')
cut_edges = [
(u, v)
for (u, v, d) in aug_graph.edges(data=True)
if not (
d.get('is_cut') or
d.get('decision', 'unreviewed') in ['nomatch']
)
]
cut_edges = [edge for edge, flag in edge_to_iscut.items() if flag]
aug_graph.remove_edges_from(cut_edges)
# Enumerate cliques inside labels
unflat_edges = [list(ut.itertwo(nodes))
for nodes in label_to_nodes.values()]
node_pairs = [tup for tup in ut.iflatten(unflat_edges)
if tup[0] != tup[1]]
# Remove candidate MST edges that exist in the original graph
orig_edges = list(aug_graph.edges())
candidate_mst_edges = [edge for edge in node_pairs
if not aug_graph.has_edge(*edge)]
# randomness prevents chains and visually looks better
rng = np.random.RandomState(42)
def _randint():
return 0
return rng.randint(0, 100)
aug_graph.add_edges_from(candidate_mst_edges)
# Weight edges in aug_graph such that existing edges are chosen
# to be part of the MST first before suplementary edges.
nx.set_edge_attributes(aug_graph, name='weight', values={edge: 0.1 for edge in orig_edges})
try:
# Try linking by time for lynx data
nodes = list(set(ut.iflatten(candidate_mst_edges)))
aids = ut.take(infr.node_to_aid, nodes)
times = infr.ibs.annots(aids).time
node_to_time = ut.dzip(nodes, times)
time_deltas = np.array([
abs(node_to_time[u] - node_to_time[v])
for u, v in candidate_mst_edges
])
# print('time_deltas = %r' % (time_deltas,))
maxweight = vt.safe_max(time_deltas, nans=False, fill=0) + 1
time_deltas[np.isnan(time_deltas)] = maxweight
time_delta_weight = 10 * time_deltas / (time_deltas.max() + 1)
is_comp = infr.guess_if_comparable(candidate_mst_edges)
comp_weight = 10 * (1 - is_comp)
extra_weight = comp_weight + time_delta_weight
# print('time_deltas = %r' % (time_deltas,))
nx.set_edge_attributes(aug_graph, name='weight', values={edge: 10.0 + extra for edge, extra in zip(candidate_mst_edges, extra_weight)})
except Exception:
print('FAILED WEIGHTING USING TIME')
nx.set_edge_attributes(aug_graph, name='weight', values={edge: 10.0 + _randint() for edge in candidate_mst_edges})
new_edges = []
for cc_sub_graph in nx.connected_component_subgraphs(aug_graph):
mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
# Only add edges not in the original graph
for edge in mst_sub_graph.edges():
if not graph.has_edge(*edge):
new_edges.append(e_(*edge))
| apache-2.0 |
zhuango/python | pandasLearning/featureProcessing.py | 2 | 4334 | import numpy as np
import pandas as pd
from math import *
from sklearn import preprocessing
# def cut(tasks):
# count = 4
# tasks['任务gps经度'] = pd.factorize(pd.qcut(tasks['任务gps经度'], count))[0]
# tasks['任务gps纬度'] = pd.factorize(pd.qcut(tasks['任务gps纬度'], count))[0]
# attribute = 'vip_count_around_33934.34627910165'
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = 'averaged_begining_time_33934.34627910165'
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = 'averaged_credit_33934.34627910165'
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = 'vip_count_around_16967.173139550825'
# #tasks[attribute][tasks.where(tasks[attribute]==0)[0]] = tasks[attribute][tasks[attribute].nonzero()[0]].min() / 10
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = 'averaged_begining_time_16967.173139550825'
# #tasks[attribute][tasks.where(tasks[attribute]==0)[0]] = tasks[attribute][tasks[attribute].nonzero()[0]].min() / 10
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = 'averaged_credit_16967.173139550825'
# #tasks[attribute][tasks.where(tasks[attribute]==0)[0]] = tasks[attribute][tasks[attribute].nonzero()[0]].min() / 10
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = '【24653.4159638米内】任务数'
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
# attribute = '【12326.7079819米内】任务数'
# #tasks[attribute][tasks.where(tasks[attribute]==0)[0]] = tasks[attribute][tasks[attribute].nonzero()[0]].min() / 10
# tasks[attribute] = pd.factorize(pd.qcut(tasks[attribute], count))[0]
def standard(tasks, attri):
scaler = preprocessing.StandardScaler()
tasks[attri] = scaler.fit_transform(tasks[attri])
tasks = pd.read_csv('/home/laboratory/Desktop/math/featured_tasks.csv', header=0)
vips = pd.read_csv('/home/laboratory/Desktop/math/vips.csv', header=0)
# tasks = tasks[(tasks['任务gps纬度'] % 0.000001 > 1e-8 )]
# tasks = tasks[(tasks['任务gps经度'] % 0.00001 > 1e-7 )]
tasks = tasks[(tasks['任务标价'] >= 65) & (tasks['任务标价'] <= 75)]
# tasks = tasks[(tasks['任务标价'] >= 66.3) & (tasks['任务标价'] <= 69.3)]
# vips = vips[(vips['会员gps纬度'] % 0.00001 > 1e-7)]
# vips = vips[(vips['会员gps经度'] % 0.00001 > 1e-7)]
# standard(tasks, '任务gps经度')
# standard(tasks, '任务gps纬度')
# standard(tasks, 'vip_count_around_33934.34627910165')
# standard(tasks, 'averaged_begining_time_33934.34627910165')
# standard(tasks, 'averaged_credit_33934.34627910165')
# standard(tasks, 'vip_count_around_16967.173139550825')
# standard(tasks, 'averaged_begining_time_16967.173139550825')
# standard(tasks, 'averaged_credit_16967.173139550825')
# standard(tasks, 'averaged_limit16967.173139550825')
# standard(tasks, 'averaged_limit33934.34627910165')
# standard(tasks, '位置_factorized')
# standard(tasks, '【24653.4159638米内】任务数')
# standard(tasks, '【12326.7079819米内】任务数')
# standard(tasks, '聚类特征')
# standard(tasks, '任务到会员的最小距离')
# standard(tasks, '任务到会员的平均值')
tasks = tasks[['任务号码',
'任务gps经度',
'任务gps纬度',
'vip_count_around_33934.34627910165',
'averaged_begining_time_33934.34627910165',
'averaged_credit_33934.34627910165',
'vip_count_around_16967.173139550825',
'averaged_begining_time_16967.173139550825',
'averaged_credit_16967.173139550825',
'位置_factorized',
'【24653.4159638米内】任务数',
'【12326.7079819米内】任务数',
'averaged_limit16967.173139550825',
'averaged_limit33934.34627910165',
'聚类特征',
'任务标价',
'任务执行情况',
'任务到会员的最小距离',
'任务到会员的平均值'
]]
tasks.to_csv("final_feature_linear_notStandard.csv", index=False)
vips.to_csv('vips_linear.csv', index=False)
#tasks.to_csv("final_feature_tree.csv") | gpl-2.0 |
ericdill/bokeh | bokeh/cli/core.py | 42 | 16025 | from __future__ import absolute_import, print_function
import sys, os
from six.moves.urllib import request as urllib2
from six.moves import cStringIO as StringIO
import pandas as pd
try:
import click
is_click = True
except ImportError:
is_click = False
from . import help_messages as hm
from .utils import (get_chart_params, get_charts_mapping,
get_data_series, keep_source_input_sync, get_data_from_url)
from .. import charts as bc
from ..charts import utils as bc_utils
from bokeh.models.widgets import Button
# Define a mapping to connect chart types supported arguments and chart classes
CHARTS_MAP = get_charts_mapping()
if is_click:
@click.command()
@click.option('--input', 'input_source', default=None,help=hm.HELP_INPUT)
@click.option('--output', default='file://cli_output.html', help=hm.HELP_OUTPUT)
@click.option('--title', default='Bokeh CLI')
@click.option('--chart_type', default='Line')
@click.option('--index', default='', help=hm.HELP_INDEX)
@click.option('--series', default='', help=hm.HELP_SERIES)
@click.option('--palette')
@click.option('--buffer', default='f', help=hm.HELP_BUFFER)
@click.option('--sync_with_source', default=False)
@click.option('--update_ranges', 'update_ranges', flag_value=True,
default=False)
@click.option('--legend', 'show_legend', flag_value=True,
default=False)
@click.option('--window_size', default='0', help=hm.HELP_WIN_SIZE)
@click.option('--map', 'map_', default=None)
@click.option('--map_zoom', 'map_zoom', default=12)
@click.option('--map_layer', 'map_layer', default="hybrid")
@click.option('--smart_filters', 'smart_filters', flag_value=True,
default=False)
def cli(input_source, output, title, chart_type, series, palette, index,
buffer, sync_with_source, update_ranges, show_legend, window_size,
map_, smart_filters, map_zoom, map_layer):
"""Bokeh Command Line Tool is a minimal client to access high level plotting
functionality provided by bokeh.charts API.
Examples:
>> python bokeh-cli.py --title "My Nice Plot" --series "High,Low,Close"
--chart_type "Line" --palette Reds --input sample_data/stocks_data.csv
>> cat sample_data/stocks_data.csv | python bokeh-cli.py --buffer t
>> python bokeh-cli.py --help
"""
cli = CLI(
input_source, output, title, chart_type, series, palette, index, buffer,
sync_with_source, update_ranges, show_legend, window_size, map_,
smart_filters, map_zoom, map_layer
)
cli.run()
else:
def cli():
print("The CLI tool requires click to be installed")
class CLI(object):
"""This is the Bokeh Command Line Interface class and it is in
charge of providing a very high level access to bokeh charts and
extends it with functionality.
"""
def __init__(self, input_source, output, title, chart_type, series, palette,
index, buffer, sync_with_source, update_ranges, show_legend,
window_size, map_, smart_filters, map_zoom, map_layer):
"""Args:
input_source (str): path to the series data file (i.e.:
/source/to/my/data.csv)
NOTE: this can be either a path to a local file or an url
output (str, optional): Selects the plotting output, which
could either be sent to an html file or a bokeh server
instance. Syntax convention for this option is as follows:
<output_type>://<type_arg>
where:
- output_type: 'file' or 'server'
- 'file' type options: path_to_output_file
- 'server' type options syntax: docname[@url][@name]
Defaults to: --output file://cli_output.html
Examples:
--output file://cli_output.html
--output file:///home/someuser/bokeh_rocks/cli_output.html
--output server://clidemo
Default: file://cli_output.html.
title (str, optional): the title of your chart.
Default: None.
chart_type (str, optional): charts classes to use to consume and
render the input data.
Default: Line.
series (str, optional): Name of the series from the input source
to include in the plot. If not specified all source series
will be included.
Defaults to None.
palette (str, optional): name of the colors palette to use.
Default: None.
index (str, optional): Name of the data series to be used as the
index when plotting. By default the first series found on the
input file is taken
Default: None
buffer (str, optional): if is `t` reads data source as string from
input buffer using StringIO(sys.stdin.read()) instead of
reading from a file or an url.
Default: "f"
sync_with_source (bool, optional): if True keep the charts source
created on bokeh-server sync'ed with the source acting like
`tail -f`.
Default: False
window_size (int, optional): show up to N values then start dropping
off older ones
Default: '0'
Attributes:
source (obj): datasource object for the created chart.
chart (obj): created chart object.
"""
self.input = input_source
self.series = series
self.index = index
self.last_byte = -1
self.sync_with_source = sync_with_source
self.update_ranges = update_ranges
self.show_legend = show_legend
self.window_size = int(window_size)
self.smart_filters = smart_filters
self.map_options = {}
self.current_selection = []
self.source = self.get_input(input_source, buffer)
# get the charts specified by the user
self.factories = create_chart_factories(chart_type)
if palette:
print ("Sorry, custom palettes not supported yet, coming soon!")
# define charts init parameters specified from cmd line and create chart
self.chart_args = get_chart_params(
title, output, show_legend=self.show_legend
)
if self.smart_filters:
self.chart_args['tools'] = "pan,wheel_zoom,box_zoom,reset,save," \
"box_select,lasso_select"
if map_:
self.map_options['lat'], self.map_options['lng'] = \
[float(x) for x in map_.strip().split(',')]
self.map_options['zoom'] = int(map_zoom)
# Yeah, unfortunate namings.. :-)
self.map_options['map_type'] = map_layer
def on_selection_changed(self, obj, attrname, old, new):
self.current_selection = new
def limit_source(self, source):
""" Limit source to cli.window_size, if set.
Args:
source (mapping): dict-like object
"""
if self.window_size:
for key in source.keys():
source[key] = source[key][-self.window_size:]
def run(self):
""" Start the CLI logic creating the input source, data conversions,
chart instances to show and all other niceties provided by CLI
"""
try:
self.limit_source(self.source)
children = []
if self.smart_filters:
copy_selection = Button(label="copy current selection")
copy_selection.on_click(self.on_copy)
children.append(copy_selection)
self.chart = create_chart(
self.series, self.source, self.index, self.factories,
self.map_options, children=children, **self.chart_args
)
self.chart.show()
self.has_ranged_x_axis = 'ranged_x_axis' in self.source.columns
self.columns = [c for c in self.source.columns if c != 'ranged_x_axis']
if self.smart_filters:
for chart in self.chart.charts:
chart.source.on_change('selected', self, 'on_selection_changed')
self.chart.session.poll_document(self.chart.doc)
except TypeError:
if not self.series:
series_list = ', '.join(self.chart.values.keys())
print(hm.ERR_MSG_TEMPL % series_list)
raise
if self.sync_with_source:
keep_source_input_sync(self.input, self.update_source, self.last_byte)
def on_copy(self, *args, **kws):
print("COPYING CONTENT!")
# TODO: EXPERIMENTAL!!! THIS EXPOSE MANY SECURITY ISSUES AND SHOULD
# BE REMOVED ASAP!
txt = ''
for rowind in self.current_selection:
row = self.source.iloc[rowind]
txt += u"%s\n" % (u",".join(str(row[c]) for c in self.columns))
os.system("echo '%s' | pbcopy" % txt)
def update_source(self, new_source):
""" Update self.chart source with the new data retrieved from
new_source. It is done by parsing the new source line,
trasforming it to data to be appended to self.chart source
updating it on chart.session and actually updating chart.session
objects.
Args:
new_source (str): string that contains the new source row to
read to the current chart source.
"""
ns = pd.read_csv(StringIO(new_source), names=self.columns)
len_source = len(self.source)
if self.has_ranged_x_axis:
ns['ranged_x_axis'] = [len_source]
self.index = 'ranged_x_axis'
ns.index = [len_source]
self.source = pd.concat([self.source, ns])
# TODO: This should be replaced with something that just computes
# the new data and source
fig = create_chart(self.series, ns, self.index, self.factories,
self.map_options, **self.chart_args)
for i, _c in enumerate(fig.charts):
if not isinstance(_c, bc.GMap):
# TODO: nested charts are getting ridiculous. Need a better
# better interface for charts :-)
scc = self.chart.charts[i]
for k, v in _c.source.data.items():
scc.source.data[k] = list(scc.source.data[k]) + list(v)
self.limit_source(scc.source.data)
chart = scc.chart
chart.session.store_objects(scc.source)
if self.update_ranges:
plot = chart.plot
plot.y_range.start = min(
plot.y_range.start, _c.chart.plot.y_range.start
)
plot.y_range.end = max(
plot.y_range.end, _c.chart.plot.y_range.end
)
plot.x_range.start = min(
plot.x_range.start, _c.chart.plot.x_range.start
)
plot.x_range.end = max(
plot.x_range.end, _c.chart.plot.x_range.end
)
chart.session.store_objects(plot)
def get_input(self, filepath, buffer):
"""Parse received input options. If buffer is not false (=='f') if
gets input data from input buffer othewise opens file specified in
sourcefilename,
Args:
filepath (str): path to the file to read from to retrieve data
buffer (str): if == 't' reads data from input buffer
Returns:
string read from filepath/buffer
"""
if buffer != 'f':
filepath = StringIO(sys.stdin.read())
elif filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
else:
if filepath.lower().startswith('http'):
# Create a request for the given URL.
request = urllib2.Request(filepath)
data = get_data_from_url(request)
self.last_byte = len(data)
else:
filepath = open(filepath, 'r').read()
self.last_byte = len(filepath)
filepath = StringIO(filepath)
source = pd.read_csv(filepath)
return source
def create_chart(series, source, index, factories, map_options=None, children=None, **args):
"""Create charts instances from types specified in factories using
data series names, source, index and args
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
index (str): name of the series of source to be used as index.
factories (list(ChartObject)): list of chart classes to be used
to create the charts to be plotted
**args: arguments to pass to the charts when creating them.
"""
if not index:
# if no index was specified as for x axis
# we take a default "range"
index = 'ranged_x_axis'
# add the new x range data to the source dataframe
source[index] = range(len(source[source.columns[0]]))
indexes = [x for x in index.split(',') if x]
data_series = get_data_series(series, source, indexes)
# parse queries to create the charts..
charts = []
for chart_type in factories:
if chart_type == bc.GMap:
if not map_options or \
not all([x in map_options for x in ['lat', 'lng']]):
raise ValueError("GMap Charts need lat and lon coordinates!")
all_args = dict(map_options)
all_args.update(args)
chart = chart_type(**all_args)
else:
if chart_type == bc.TimeSeries:
# in case the x axis type is datetime that column must be converted to
# datetime
data_series[index] = pd.to_datetime(source[index])
elif chart_type == bc.Scatter:
if len(indexes) == 1:
scatter_ind = [x for x in data_series.pop(indexes[0]).values]
scatter_ind = [scatter_ind] * len(data_series)
else:
scatter_ind = []
for key in indexes:
scatter_ind.append([x for x in data_series.pop(key).values])
if len(scatter_ind) != len(data_series):
err_msg = "Number of multiple indexes must be equals" \
" to the number of series"
raise ValueError(err_msg)
for ind, key in enumerate(data_series):
values = data_series[key].values
data_series[key] = zip(scatter_ind[ind], values)
chart = chart_type(data_series, **args)
if hasattr(chart, 'index'):
chart.index = index
charts.append(chart)
fig = bc_utils.Figure(*charts, children=children, **args)
return fig
def create_chart_factories(chart_types):
"""Receive the chart type(s) specified by the user and build a
list of the their related functions.
Args:
series (str): string that contains the name of the
chart classes to use when creating the chart, separated by `,`
example:
>> create_chart_factories('Line,step')
[Line, Step]
"""
return [get_chart(name) for name in chart_types.split(',') if name]
def get_chart(class_name):
"""Return the bokeh class specified in class_name.
Args:
class_name (str): name of the chart class to return (i.e.: Line|step)
"""
return CHARTS_MAP[class_name.strip().lower()]
if __name__ == '__main__':
cli()
| bsd-3-clause |
nhoffman/bioy | bioy_pkg/subcommands/classifier.py | 2 | 37986 | # This file is part of Bioy
#
# Bioy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bioy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bioy. If not, see <http://www.gnu.org/licenses/>.
"""Classify sequences by grouping blast output by matching taxonomic names
Optional grouping by specimen and query sequences
Running the program
-------------------
::
positional arguments:
blast_file CSV tabular blast file of query and subject hits.
seq_info File mapping reference seq name to tax_id
taxonomy Table defining the taxonomy for each tax_id
optional arguments:
-h, --help show this help message and exit
--threads NUM Number of threads (CPUs). Can also specify with
environment variable THREADS_ALLOC. [32]
--copy-numbers CSV Estimated 16s rRNA gene copy number for each
tax_ids (CSV file with columns: tax_id, median)
--rank-thresholds CSV
Columns [tax_id,ranks...]
--specimen-map CSV CSV file with columns (name, specimen) assigning
sequences to groups. The default behavior is to
treat all query sequences as
belonging to one specimen.
-w CSV, --weights CSV
Optional headless csv file with columns 'seqname',
'count' providing weights for each query sequence
described in the blast input (used, for example, to
describe cluster sizes for corresponding cluster
centroids).
-o FILE, --out FILE Classification results.
-O FILE, --details-out FILE
Optional details of taxonomic assignments.
--details-full do not limit out_details to only larget cluster per
assignment
--group-def INTEGER define a group threshold for a particular rank
overriding --max-group-size. example:
genus:2 (NOT IMPLEMENTED)
--has-header specify this if blast data has a header
--min-identity PERCENT
minimum identity threshold for accepting matches
[>= 0.0]
--max-identity PERCENT
maximum identity threshold for accepting matches
[<= 100.0]
--min-cluster-size INTEGER
minimum cluster size to include in classification
output [1]
--min-coverage PERCENT
percent of alignment coverage of blast result [0.0]
--specimen LABEL Single group label for reads
--starred PERCENT Names of organisms for which at least one reference
sequence has pairwise identity with a query
sequence of at least PERCENT will be marked with an
asterisk[100.0]
--max-group-size INTEGER
group multiple target-rank assignments that excede
a threshold to a higher rank [3]
Positional arguments
++++++++++++++++++++
blast_file
==========
A csv file with columns **qseqid**, **sseqid**, **pident**,
**qstart**, **qend**, **qlen** and **qcovs**.
.. note:: The actual header is optional if using default blast out format but
if present make sure to use the --has-header switch
seq_info
========
A csv file with minimum columns **seqname** and **tax_id**. Additional
columns will be included in the details output.
taxonomy
========
A csv file with columns **tax_id**, **rank** and **tax_name**, plus at least
one additional rank column(s) creating a taxonomic tree such as **species**,
**genus**, **family**, **class**, **pylum**, **kingdom** and/or **root**.
The rank columns also give an order of specificity from right to left,
least specific to most specific respectively.
Optional input
++++++++++++++
rank-thresholds
===============
TODO
copy-numbers
============
Below is an *example* copy numbers csv with the required columns:
====== ==================== ======
tax_id tax_name median
====== ==================== ======
155977 Acaryochloris 2.00
155978 Acaryochloris marina 2.00
434 Acetobacter 5.00
433 Acetobacteraceae 3.60
====== ==================== ======
weights
=======
Headerless file containing two columns specifying the seqname (clustername) and
weight (or number of sequences in the cluster).
Output
++++++
out
===
A csv with columns and headers as in the example below:
=========== =============== ======================================
specimen assignment_id assignment
=========== =============== ======================================
039_3 0 Pseudomonas mendocina;Pseudonocardia
039_3 1 Rhizobiales
039_3 2 Alcaligenes faecalis*
039_3 3 [no blast result]
=========== =============== ======================================
======= ============= =============
low max_percent min_percent
======= ============= =============
95.00 99.02 95.74
95.00 98.91 95.31
99.00 100.00 99.00
======= ============= =============
================= ======= =========== ===========
condensed_rank reads pct_reads clusters
================= ======= =========== ===========
species 6 35.29 1
genus 5 29.41 1
species 5 29.41 1
1 5.88 1
================= ======= =========== ===========
details-out
===========
A csv that is basically a blast results breakdown of the `out`_ output.
Internal functions
------------------
Known bugs
----------
Tax_ids of valid Blast hits (hits that meet their rank thresholds) may be
assigned tax_ids of a higher threshold that *could* represent invalid tax_ids
(tax_ids that may *not* have passed the rank threshold).
"""
import os
import sys
import logging
import pandas as pd
import math
from bioy_pkg import sequtils, _data as datadir
log = logging.getLogger(__name__)
ASSIGNMENT_TAX_ID = 'assignment_tax_id'
def raw_filtering(blast_results, min_coverage=None,
max_identity=None, min_identity=None):
"""run raw hi, low and coverage filters and output log information
"""
blast_results_len = len(blast_results)
if min_coverage:
# run raw hi, low and coverage filters
blast_results = blast_results[
blast_results['qcovs'] >= min_coverage]
blast_results_post_len = len(blast_results)
len_diff = blast_results_len - blast_results_post_len
if len_diff:
log.warn('dropping {} sequences below '
'coverage threshold'.format(len_diff))
blast_results_len = blast_results_post_len
if max_identity:
blast_results = blast_results[
blast_results['pident'] <= max_identity]
blast_results_post_len = len(blast_results)
len_diff = blast_results_len - blast_results_post_len
if len_diff:
log.warn('dropping {} sequences above max_identity'.format(
len_diff))
blast_results_len = blast_results_post_len
if min_identity:
blast_results = blast_results[
blast_results['pident'] >= min_identity]
blast_results_post_len = len(blast_results)
len_diff = blast_results_len - blast_results_post_len
if len_diff:
log.warn('dropping {} sequences below min_identity'.format(
len_diff))
blast_results_len = blast_results_post_len
return blast_results
def round_up(x):
"""round up any x < 0.01
"""
return max(0.01, x)
def star(df, starred):
"""Assign boolean if any items in the
dataframe are above the star threshold.
"""
df['starred'] = df.pident.apply(lambda x: x >= starred).any()
return df
def condense_ids(
df, tax_dict, ranks, max_group_size, threshold_assignments=False):
"""
Create mapping from tax_id to its condensed id. Also creates the
assignment hash on either the condensed_id or assignment_tax_id decided
by the --split-condensed-assignments switch.
By taking a hash of the set (frozenset) of ids the qseqid is given a
unique identifier (the hash). Later, we will use this hash and
assign an ssignment name based on either the set of condensed_ids or
assignment_tax_ids. The modivation for using a hash rather than
the actual assignment text for grouping is that the assignment text
can contains extra annotations that are independent of which
assignment group a qseqid belongs to such as a 100% id star.
"""
condensed = sequtils.condense_ids(
df[ASSIGNMENT_TAX_ID].unique(),
tax_dict,
ranks=ranks,
max_size=max_group_size)
condensed = pd.DataFrame(
condensed.items(),
columns=[ASSIGNMENT_TAX_ID, 'condensed_id'])
condensed = condensed.set_index(ASSIGNMENT_TAX_ID)
if threshold_assignments:
assignment_hash = hash(frozenset(condensed.index.unique()))
else:
assignment_hash = hash(frozenset(condensed['condensed_id'].unique()))
condensed['assignment_hash'] = assignment_hash
return df.join(condensed, on=ASSIGNMENT_TAX_ID)
def assign(df, tax_dict):
"""Create str assignment based on tax_ids str and starred boolean.
"""
ids_stars = df.groupby(by=['condensed_id', 'starred']).groups.keys()
df['assignment'] = sequtils.compound_assignment(ids_stars, tax_dict)
return df
def assignment_id(df):
"""Resets and drops the current dataframe's
index and sets it to the assignment_hash
assignment_id is treated as a string identifier to account
for hits in details with no assignment or assignment_id
"""
df = df.reset_index(drop=True) # specimen is retained in the group key
df.index.name = 'assignment_id'
df.index = df.index.astype(str)
return df
def best_rank(s, ranks):
"""Create aggregate columns for assignments.
`ranks' are sorted with less specific first for example:
['root', 'kingdom', 'phylum', 'order', 'family',
'genus', 'species_group', 'species']
so when sorting the indexes the most specific
rank will be in the iloc[-1] location
"""
value_counts = s.value_counts()
if len(value_counts) == 0:
# [no blast result]
return None
else:
def specificity(r):
return ranks.index(r) if r in ranks else -1
majority_rank = value_counts[value_counts == value_counts.max()]
majority_rank.name = 'rank'
majority_rank = majority_rank.to_frame()
majority_rank['specificity'] = majority_rank['rank'].apply(specificity)
majority_rank = majority_rank.sort_values(by=['specificity'])
# most precise rank is at the highest index (-1)
return majority_rank.iloc[-1].name
def find_tax_id(series, valids, r, ranks):
"""Return the most taxonomic specific tax_id available for the given
Series. If a tax_id is already present in valids[r] then return None.
"""
index = ranks.index(r)
series = series[ranks[index:]]
series = series[~series.isnull()]
found = series.head(n=1)
key = found.index.values[0]
value = found.values[0]
return value if value not in valids[key].unique() else None
def select_valid_hits(df, ranks):
"""Return valid hits of the most specific rank that passed their
corresponding rank thresholds. Hits that pass their rank thresholds
but do not have a tax_id at that rank will be bumped to a less specific
rank id and varified as a unique tax_id.
"""
for r in ranks:
thresholds = df['{}_threshold'.format(r)]
pidents = df['pident']
valid = df[thresholds < pidents]
if valid.empty:
# Move to higher rank
continue
tax_ids = valid[r]
na_ids = tax_ids.isnull()
# Occasionally tax_ids will be missing at a certain rank.
# If so use the next less specific tax_id available
if na_ids.all():
continue
if na_ids.any():
# bump up missing taxids
have_ids = valid[tax_ids.notnull()]
found_ids = valid[na_ids].apply(
find_tax_id, args=(have_ids, r, ranks), axis=1)
tax_ids = have_ids[r].append(found_ids)
valid[ASSIGNMENT_TAX_ID] = tax_ids
valid['assignment_threshold'] = thresholds
# return notnull() assignment_threshold valid values
return valid[valid[ASSIGNMENT_TAX_ID].notnull()]
# nothing passed
df[ASSIGNMENT_TAX_ID] = None
df['assignment_threshold'] = None
return pd.DataFrame(columns=df.columns)
def calculate_pct_references(df, pct_reference):
reference_count = df[['tax_id']].drop_duplicates()
reference_count = reference_count.join(pct_reference, on='tax_id')
reference_count = reference_count['count'].sum()
sseqid_count = float(len(df['sseqid'].drop_duplicates()))
df['pct_reference'] = sseqid_count / reference_count
return df
def pct(s):
"""Calculate series pct something
"""
return s / s.sum() * 100
def load_rank_thresholds(
path=os.path.join(datadir, 'rank_thresholds.csv'), usecols=None):
"""Load a rank-thresholds file. If no argument is specified the default
rank_threshold_defaults.csv file will be loaded.
"""
return pd.read_csv(
path,
comment='#',
usecols=['tax_id'] + usecols,
dtype=dict(tax_id=str)).set_index('tax_id')
def copy_corrections(copy_numbers, blast_results, user_file=None):
copy_numbers = pd.read_csv(
copy_numbers,
dtype=dict(tax_id=str, median=float),
usecols=['tax_id', 'median']).set_index('tax_id')
# get root out (taxid: 1) and set it as the default correction value
# set index nana (no blast result) to the defaul value
default = copy_numbers.get_value('1', 'median')
default_entry = pd.DataFrame(default, index=[None], columns=['median'])
copy_numbers = copy_numbers.append(default_entry)
# do our copy number correction math
corrections = blast_results[
[ASSIGNMENT_TAX_ID, 'specimen', 'assignment_hash']]
corrections = corrections.drop_duplicates()
corrections = corrections.set_index(ASSIGNMENT_TAX_ID)
corrections = corrections.join(copy_numbers)
# any tax_id not present will receive default tax_id
corrections['median'] = corrections['median'].fillna(default)
corrections = corrections.groupby(
by=['specimen', 'assignment_hash'], sort=False)
corrections = corrections['median'].mean()
return corrections
def join_thresholds(df, thresholds, ranks):
"""Thresholds are matched to thresholds by rank id.
If a rank id is not present in the thresholds then the next specific
rank id is used all the way up to `root'. If the root id still does
not match then a warning is issued with the taxname and the blast hit
is dropped.
"""
with_thresholds = pd.DataFrame(columns=df.columns) # temp DFrame
for r in ranks:
with_thresholds = with_thresholds.append(
df.join(thresholds, on=r, how='inner'))
no_threshold = df[~df.index.isin(with_thresholds.index)]
df = no_threshold
# issue warning messages for everything that did not join
if len(df) > 0:
tax_names = df['tax_name'].drop_duplicates()
msg = ('dropping blast hit `{}\', no valid '
'taxonomic threshold information found')
tax_names.apply(lambda x: log.warn(msg.format(x)))
return with_thresholds
def get_compression(io):
if io is sys.stdout:
compression = None
else:
compress_ops = {'.gz': 'gzip', '.bz2': 'bz2'}
ext = os.path.splitext(io)[-1]
compression = compress_ops.get(ext, None)
return compression
def build_parser(parser):
# required inputs
parser.add_argument(
'blast_file',
help="""CSV tabular blast file of
query and subject hits, containing
at least {}.""".format(sequtils.BLAST_FORMAT_DEFAULT))
parser.add_argument(
'seq_info',
help='File mapping reference seq name to tax_id')
parser.add_argument(
'taxonomy',
help="""Table defining the taxonomy for each tax_id""")
# optional inputs
parser.add_argument(
'--copy-numbers', metavar='CSV',
help="""Estimated 16s rRNA gene copy number for each tax_ids
(CSV file with columns: tax_id, median)""")
parser.add_argument(
'--rank-thresholds', metavar='CSV',
help="""Columns [tax_id,ranks...]""")
parser.add_argument(
'--specimen-map', metavar='CSV',
help="""CSV file with columns (name, specimen) assigning sequences to
groups. The default behavior is to treat all query sequences
as belonging to one specimen.""")
parser.add_argument(
'-w', '--weights', metavar='CSV',
help="""Optional headless csv file with columns 'seqname',
'count' providing weights for each query sequence described in
the blast input (used, for example, to describe cluster sizes
for corresponding cluster centroids).""")
# common outputs
parser.add_argument(
'-o', '--out',
default=sys.stdout,
metavar='FILE',
help="Classification results.")
parser.add_argument(
'-O', '--details-out',
metavar='FILE',
help="""Optional details of taxonomic assignments.""")
# switches and options
parser.add_argument(
'--details-full', action='store_true',
help='do not limit out_details to only larget cluster per assignment')
parser.add_argument(
'--hits-below-threshold',
action='store_true',
help=('Hits that were below the best-rank threshold '
'will be included in the details'))
parser.add_argument(
'--include-ref-rank',
action='append',
default=[],
help=("Given a single rank (species,genus,etc), "
"include each reference "
"sequence's tax_id as $\{rank\}_id and its taxonomic name as "
"$\{rank\}_name in details output "))
parser.add_argument(
'--group-def', metavar='INTEGER', action='append',
default=[], help="""define a group threshold for a
particular rank overriding --max-group-size. example:
genus:2 (NOT IMPLEMENTED)""")
parser.add_argument(
'--has-header', action='store_true',
help='specify this if blast data has a header')
parser.add_argument(
'--min-identity', metavar='PERCENT', type=float,
help="""minimum identity threshold
for accepting matches""")
parser.add_argument(
'--max-identity', metavar='PERCENT', type=float,
help="""maximum identity threshold for
accepting matches""")
parser.add_argument(
'--min-cluster-size', default=1, metavar='INTEGER', type=int,
help="""minimum cluster size to include in
classification output [%(default)s]""")
parser.add_argument(
'--min-coverage', type=float, metavar='PERCENT',
help='percent of alignment coverage of blast result')
parser.add_argument(
'--specimen', metavar='LABEL',
help="""Single group label for reads""")
parser.add_argument(
'--starred', default=100.0, metavar='PERCENT', type=float,
help="""Names of organisms for which at least one reference
sequence has pairwise identity with a query sequence of at
least PERCENT will be marked with an asterisk [%(default)s]""")
parser.add_argument(
'--max-group-size', metavar='INTEGER', default=3, type=int,
help="""group multiple target-rank assignments that excede a
threshold to a higher rank [%(default)s]""")
parser.add_argument(
'--pct-reference', action='store_true',
help="""include column with percent sseqids per assignment_id
(NOT IMPLEMENTED)""")
parser.add_argument(
'--split-condensed-assignments',
action='store_true',
dest='threshold_assignments',
help=('Do not combine common condensed assignments'))
parser.add_argument(
'--limit', type=int, help='limit number of blast results')
parser.add_argument(
'--best-n-hits', type=int,
help="""for each query sequence, filter out all but the best N hits,
based on the number of mismatches. NOTE: If using this option, blast
results MUST contain "mismatch" column and column headers
""")
def action(args):
# for debugging:
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# format blast data and add additional available information
names = None if args.has_header else sequtils.BLAST_HEADER_DEFAULT
header = 0 if args.has_header else None
usecols = ['qseqid', 'sseqid', 'pident', 'qcovs']
if args.best_n_hits:
usecols.append('mismatch')
log.info('loading blast results')
blast_results = pd.read_csv(
args.blast_file,
dtype=dict(qseqid=str, sseqid=str, pident=float, coverage=float),
names=names,
na_filter=True, # False is faster
header=header,
usecols=usecols,
nrows=args.limit)
if blast_results.empty:
log.info('blast results empty, exiting.')
return
# load specimen-map
if args.specimen_map:
# if a specimen_map is defined and a qseqid is not included in the map
# hits to that qseqid will be dropped (inner join)
spec_map = pd.read_csv(
args.specimen_map,
names=['qseqid', 'specimen'],
usecols=['qseqid', 'specimen'],
dtype=str)
spec_map = spec_map.drop_duplicates()
spec_map = spec_map.set_index('qseqid')
blast_results = blast_results.join(spec_map, on='qseqid', how='inner')
elif args.specimen:
blast_results['specimen'] = args.specimen
else:
blast_results['specimen'] = blast_results['qseqid'] # by qseqid
# get a set of qseqids for identifying [no blast hits] after filtering
qseqids = blast_results[['specimen', 'qseqid']].drop_duplicates()
blast_results_len = len(blast_results)
log.info('successfully loaded {} blast results for {} query '
'sequences'.format(blast_results_len, len(qseqids)))
blast_results = raw_filtering(blast_results)
# remove no blast hits
# no_blast_results will be added back later but we do not
# want to confuse these with blast results filter by joins
log.info('identifying no_blast_hits')
blast_results = blast_results[blast_results['sseqid'].notnull()]
# load seq_info as a bridge to the sequence taxonomy. Additional
# columns can be specified to be included in the details-out file
# such as accession number
seq_info = pd.read_csv(
args.seq_info,
usecols=['seqname', 'tax_id', 'accession'],
dtype=dict(seqname=str, tax_id=str, accession=str))
seq_info = seq_info.set_index('seqname')
# rename index to match blast results column name
seq_info.index.name = 'sseqid'
# merge blast results with seq_info - do this early so that
# refseqs not represented in the blast results are discarded in
# the merge.
blast_results_len = len(blast_results)
log.info('joining seq_info file')
blast_results = blast_results.join(seq_info, on='sseqid', how='inner')
len_diff = blast_results_len - len(blast_results)
if len_diff:
log.warn('{} subject sequences dropped without '
'records in seq_info file'.format(len_diff))
# load the full taxonomy table. Rank specificity as ordered from
# left (less specific) to right (more specific)
taxonomy = pd.read_csv(args.taxonomy, dtype=str).set_index('tax_id')
# get the a list of rank columns ordered by specificity (see above)
# NOTE: we are assuming the rank columns
ranks = taxonomy.columns.tolist()
ranks = ranks[ranks.index('root'):]
# now combine just the rank columns to the blast results
blast_results_len = len(blast_results)
log.info('joining taxonomy file')
blast_results = blast_results.join(
taxonomy[['tax_name', 'rank'] + ranks], on='tax_id', how='inner')
len_diff = blast_results_len - len(blast_results)
if len_diff:
msg = '{} subject sequences dropped without records in taxonomy file'
log.warn(msg.format(len_diff))
# load the default rank thresholds
rank_thresholds = load_rank_thresholds(usecols=ranks)
# and any additional thresholds specified by the user
if args.rank_thresholds:
rank_thresholds = rank_thresholds.append(
load_rank_thresholds(path=args.rank_thresholds, usecols=ranks))
# overwrite with user defined tax_id threshold
rank_thresholds = rank_thresholds.groupby(level=0, sort=False).last()
rank_thresholds_cols = ['{}_threshold'.format(c) if c in ranks else c
for c in rank_thresholds.columns]
rank_thresholds.columns = rank_thresholds_cols
log.info('joining thresholds file')
blast_results = join_thresholds(
blast_results, rank_thresholds, ranks[::-1])
# save the blast_results.columns in case groupby drops all columns
blast_results_columns = blast_results.columns
# assign assignment tax ids based on pident and thresholds
log.info('selecting valid hits')
blast_results_len = float(len(blast_results))
valid_hits = blast_results.groupby(
by=['specimen', 'qseqid'], group_keys=False)
valid_hits = valid_hits.apply(select_valid_hits, ranks[::-1])
if args.hits_below_threshold:
"""
Store all the hits to append to blast_results details later
"""
hits_below_threshold = blast_results[
~blast_results.index.isin(valid_hits.index)]
blast_results = valid_hits
if blast_results.empty:
log.info('all blast results filtered, returning [no blast results]')
assignment_columns = ['assignment_rank', 'assignment_threshold',
'assignment_tax_name', 'condensed_id', 'starred',
'assignment', 'assignment_hash',
'condensed_rank', ASSIGNMENT_TAX_ID]
assignment_columns += blast_results_columns.tolist()
blast_results = pd.DataFrame(columns=assignment_columns)
else:
blast_results_post_len = len(blast_results)
log.info('{} ({:.0%}) valid hits selected'.format(
blast_results_post_len,
blast_results_post_len / blast_results_len))
if args.best_n_hits:
blast_results_len = len(blast_results)
def filter_mismatches(df, best_n):
"""
Filter all hits with more mismatches than the Nth best hit
"""
threshold = df['mismatch'].nsmallest(best_n).iloc[-1]
return df[df['mismatch'] <= threshold]
# Filter hits for each query
blast_results = blast_results.groupby(
by=['specimen', 'qseqid'],
group_keys=False).apply(filter_mismatches, args.best_n_hits)
blast_results_post_len = len(blast_results)
log.info('{} ({:.0%}) hits remain after filtering '
'on mismatches (--best_n_hits)'.format(
blast_results_post_len,
blast_results_post_len / blast_results_len))
# drop unneeded tax and threshold columns to free memory
for c in ranks + rank_thresholds_cols:
blast_results = blast_results.drop(c, axis=1)
# join with taxonomy for tax_name and rank
blast_results = blast_results.join(
taxonomy[['tax_name', 'rank']],
rsuffix='_assignment',
on=ASSIGNMENT_TAX_ID)
# no join(rprefix) (yet)
blast_results = blast_results.rename(
columns={'tax_name_assignment': 'assignment_tax_name',
'rank_assignment': 'assignment_rank'})
# TODO: this is relatively slow, need to integrate
# pandas into sequtils.condense_ids
tax_dict = {i: t.to_dict() for i, t in taxonomy.fillna('').iterrows()}
# create condensed assignment hashes by qseqid
msg = 'condensing group tax_ids to size {}'.format(args.max_group_size)
log.info(msg)
blast_results = blast_results.groupby(
by=['specimen', 'qseqid'], sort=False, group_keys=False)
blast_results = blast_results.apply(
condense_ids,
tax_dict,
ranks,
args.max_group_size,
threshold_assignments=args.threshold_assignments)
blast_results = blast_results.join(
taxonomy[['rank']], on='condensed_id', rsuffix='_condensed')
blast_results = blast_results.rename(
columns={'rank_condensed': 'condensed_rank'})
# star condensed ids if one hit meets star threshold
by = ['specimen', 'assignment_hash', 'condensed_id']
blast_results = blast_results.groupby(
by=by, sort=False, group_keys=False)
blast_results = blast_results.apply(star, args.starred)
# assign names to assignment_hashes
blast_results = blast_results.sort_values(by='assignment_hash')
log.info('creating compound assignments')
blast_results = blast_results.groupby(
by=['specimen', 'assignment_hash'], sort=False, group_keys=False)
blast_results = blast_results.apply(assign, tax_dict)
# Foreach ref rank:
# - merge with taxonomy, extract rank_id, rank_name
for rank in args.include_ref_rank:
blast_results[rank + '_id'] = blast_results.merge(
taxonomy, left_on='tax_id',
right_index=True,
how='left')[rank].fillna(0)
blast_results[rank + '_name'] = blast_results.merge(
taxonomy,
left_on=rank + '_id',
right_index=True,
how='left')['tax_name_y']
# merge qseqids that have no hits back into blast_results
blast_results = blast_results.merge(qseqids, how='outer')
# assign seqs that had no results to [no blast_result]
no_hits = blast_results['sseqid'].isnull()
blast_results.loc[no_hits, 'assignment'] = '[no blast result]'
blast_results.loc[no_hits, 'assignment_hash'] = 0
# concludes our blast details, on to output summary
log.info('summarizing output')
# index by specimen and assignment_hash and add assignment column
index = ['specimen', 'assignment_hash']
output = blast_results[index + ['assignment']].drop_duplicates()
output = output.set_index(index)
# assignment level stats
assignment_stats = blast_results.groupby(by=index, sort=False)
output['max_percent'] = assignment_stats['pident'].max()
output['min_percent'] = assignment_stats['pident'].min()
output['min_threshold'] = assignment_stats['assignment_threshold'].min()
output['best_rank'] = assignment_stats['condensed_rank'].apply(
best_rank, ranks)
# qseqid cluster stats
weights = blast_results[
['qseqid', 'specimen', 'assignment_hash', 'assignment_threshold']]
weights = weights.drop_duplicates().set_index('qseqid')
if args.weights:
weights_file = pd.read_csv(
args.weights,
names=['qseqid', 'weight'],
dtype=dict(qseqid=str, weight=float),
index_col='qseqid')
weights = weights.join(weights_file)
# enforce weight dtype as float and unlisted qseq's to weight of 1.0
weights['weight'] = weights['weight'].fillna(1.0).astype(float)
else:
weights['weight'] = 1.0
cluster_stats = weights[['specimen', 'assignment_hash', 'weight']]
cluster_stats = cluster_stats.reset_index().drop_duplicates()
cluster_stats = cluster_stats.groupby(
by=['specimen', 'assignment_hash'], sort=False)
output['reads'] = cluster_stats['weight'].sum()
output['clusters'] = cluster_stats.size()
# specimen level stats
specimen_stats = output.groupby(level='specimen', sort=False)
output['pct_reads'] = specimen_stats['reads'].apply(pct)
# copy number corrections
if args.copy_numbers:
corrections = copy_corrections(args.copy_numbers, blast_results)
output['corrected'] = output['reads'] / corrections
# reset corrected counts to int before calculating pct_corrected
output['corrected'] = output['corrected'].apply(math.ceil)
output['corrected'] = output['corrected'].fillna(1).astype(int)
# create pct_corrected column
output['pct_corrected'] = specimen_stats['corrected'].apply(pct)
output['pct_corrected'] = output['pct_corrected'].map(round_up)
# round reads for output
output['reads'] = output['reads'].apply(round).astype(int)
output['pct_reads'] = output['pct_reads'].map(round_up)
# sort output by:
# 1) specimen -- Data Frame is already grouped by specimen
# 2) read/corrected count
# 3) cluster count
# 4) alpha assignment
columns = ['corrected'] if args.copy_numbers else ['reads']
columns += ['clusters', 'assignment']
output = output.sort_values(by=columns, ascending=False)
output = output.reset_index(level='assignment_hash')
# Sort index (specimen) in preparation for groupby.
# Use stable sort (mergesort) to preserve sortings (1-4);
# default algorithm is not stable
output = output.sort_index(kind='mergesort')
# one last grouping on the sorted output plus assignment ids by specimen
output = output.groupby(level="specimen", sort=False).apply(assignment_id)
# output to details.csv.bz2
if args.details_out:
# Annotate details with classification columns
blast_results = blast_results.merge(output.reset_index(), how='left')
if not args.details_full:
"""
by using the assignment_threshold we will get multiple 'largest'
centroids for --max-group-size combined assignments
"""
# groupby will drop NA values so we must fill them with 0
weights['assignment_threshold'] = weights[
'assignment_threshold'].fillna(0)
largest = weights.groupby(
by=['specimen', 'assignment_hash', 'assignment_threshold'],
sort=False)
largest = largest.apply(lambda x: x['weight'].nlargest(1))
largest = largest.reset_index()
# assignment_threshold will conflict with blast_results NA values
largest = largest.drop('assignment_threshold', axis=1)
blast_results = blast_results.merge(largest)
details_columns = ['specimen', 'assignment_id', 'tax_name', 'rank',
'assignment_tax_name', 'assignment_rank', 'pident',
'tax_id', ASSIGNMENT_TAX_ID, 'condensed_id',
'accession', 'qseqid', 'sseqid', 'starred',
'assignment_threshold']
ref_rank_columns = [rank + '_id' for rank in args.include_ref_rank]
ref_rank_columns += [rank + '_name' for rank in args.include_ref_rank]
details_columns += ref_rank_columns
if args.hits_below_threshold:
"""
append assignment_thresholds and append to --details-out
"""
deets_cols = hits_below_threshold.columns
deets_cols &= set(details_columns)
hits_below_threshold = hits_below_threshold[list(deets_cols)]
threshold_cols = ['specimen', 'qseqid', 'assignment_threshold']
assignment_thresholds = blast_results[threshold_cols]
assignment_thresholds = assignment_thresholds.drop_duplicates()
hits_below_threshold = hits_below_threshold.merge(
assignment_thresholds, how='left')
blast_results = pd.concat(
[blast_results, hits_below_threshold], ignore_index=True)
# sort details for consistency and ease of viewing
blast_results = blast_results.sort_values(by=details_columns)
blast_results.to_csv(
args.details_out,
compression=get_compression(args.details_out),
columns=details_columns,
header=True,
index=False,
float_format='%.2f')
# was required to merge with details above but not needed now
output = output.drop('assignment_hash', axis=1)
# output results
output.to_csv(
args.out,
index=True,
float_format='%.2f',
compression=get_compression(args.out))
| gpl-3.0 |
danmergens/mi-instrument | mi/common/zpls_plot.py | 3 | 7240 | """
@package mi.common
@file mi/common/zpls_plot.py
@author Rene Gelinas
@brief ZPLSC Echogram generation for the ooicore
Release notes:
This class supports the generation of ZPLS echograms. It needs matplotlib version 1.3.1 (or higher) for the code
to display the colorbar on the right side of the figure. If matplotlib version 1.1.1 is used, the colorbar is
plotted over the figure instead of on the right side of it.
"""
from datetime import datetime
import numpy as np
import matplotlib
from matplotlib.dates import date2num, num2date
from modest_image import imshow
matplotlib.use("Agg")
import matplotlib.pyplot as plt
__author__ = 'Rene Gelinas'
REF_TIME = date2num(datetime(1900, 1, 1, 0, 0, 0))
class ZPLSPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, min_y, max_y, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
bin_size, _ = self.power_data_dict[1].shape
self._setup_plot(min_y, max_y, bin_size)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = []
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes.append(self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db[channel], self.max_db[channel]))
if data_axes:
self._display_x_labels(self.ax[-1], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
for index in range(len(data_axes)):
self._display_colorbar(self.fig, data_axes[index], index)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, min_y, max_y, bin_size):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, bin_size, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(min_y, max_y, self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex='all', sharey='all')
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
if not isinstance(self.ax, np.ndarray):
self.ax = [self.ax]
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
def _display_colorbar(self, fig, data_axes, order):
# Add a colorbar to the specified figure using the data from the given axes
num_freqs = len(self.frequency_dict)
plot_bottom = 0.086
verticle_space = 0.03
height_factor = 0.0525
# Calculate the position of the colorbar
width = 0.01
height = (1.0/num_freqs) - height_factor
left = 0.965
bottom = plot_bottom + ((num_freqs-order-1) * (verticle_space+height))
ax = fig.add_axes([left, bottom, width, height])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSPlot.font_size_small)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across each channel
max_db = {}
min_db = {}
for channel, channel_data in power_dict.iteritems():
all_power_data = np.concatenate(channel_data)
max_db[channel] = np.nanpercentile(all_power_data, ZPLSPlot.upper_percentile)
min_db[channel] = np.nanpercentile(all_power_data, ZPLSPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSPlot.num_xticks:
ZPLSPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSPlot.font_size_small)
ax.set_xlim(0, time_length)
| bsd-2-clause |
AlertaDengue/InfoDenguePredict | infodenguepredict/models/cross_prediction_RQF.py | 1 | 3663 | """
This scripts implements cross disease predicitons using RQF model trained on dengue
"""
from infodenguepredict.models.quantile_forest import build_model, build_lagged_features, calculate_metrics
from infodenguepredict.data.infodengue import get_cluster_data, get_city_names
from infodenguepredict.predict_settings import *
import joblib
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from skgarden import RandomForestQuantileRegressor
def plot_prediction(pred, pred25, pred975, ydata, horizon, title, path='quantile_forest', save=True, doenca='chik'):
plt.clf()
plt.plot(ydata, 'k-', label='data')
x = ydata.index.shift(horizon, freq='W')
plt.plot(x, pred, 'r-', alpha=0.5, label='median prediction')
# plt.plot(x, y25, 'b-', alpha=0.3)
# plt.plot(x, y975, 'b-', alpha=0.3)
plt.fill_between(x, pred25, pred975, color='b', alpha=0.3)
plt.grid()
plt.ylabel('Weekly cases')
plt.title('{} cross-predictions for {}'.format(doenca, title))
plt.xticks(rotation=70)
plt.legend(loc=0)
if save:
if not os.path.exists('saved_models/' + path + '/' + STATE):
os.mkdir('saved_models/' + path + '/' + STATE)
plt.savefig('saved_models/{}/{}/qf_{}_cross_{}_.png'.format(path, STATE, doenca, title), dpi=300)
plt.show()
return None
def qf_prediction(city, state, horizon, lookback, doenca='chik'):
with open('../analysis/clusters_{}.pkl'.format(state), 'rb') as fp:
clusters = pickle.load(fp)
data, group = get_cluster_data(city, clusters=clusters, data_types=DATA_TYPES, cols=PREDICTORS, doenca=doenca)
target = 'casos_est_{}'.format(city)
casos_est_columns = ['casos_est_{}'.format(i) for i in group]
# casos_columns = ['casos_{}'.format(i) for i in group]
# data = data_full.drop(casos_columns, axis=1)
data_lag = build_lagged_features(data, lookback)
data_lag.dropna()
data_lag = data_lag['2016-01-01':]
targets = {}
for d in range(1, horizon + 1):
if d == 1:
targets[d] = data_lag[target].shift(-(d - 1))
else:
targets[d] = data_lag[target].shift(-(d - 1))[:-(d - 1)]
X_data = data_lag.drop(casos_est_columns, axis=1)
city_name = get_city_names([city, 0])[0][1]
# Load dengue model
model = joblib.load('saved_models/quantile_forest/{}/{}_city_model_{}W.joblib'.format(state, city, horizon))
pred25 = model.predict(X_data, quantile=2.5)
pred = model.predict(X_data, quantile=50)
pred975 = model.predict(X_data, quantile=97.5)
# metrics.to_pickle('{}/{}/qf_metrics_{}.pkl'.format('saved_models/quantile_forest', state, city))
plot_prediction(pred, pred25, pred975, targets[1], horizon, city_name, save=True, doenca=doenca)
return model, pred, pred25, pred975, X_data, targets, data_lag
if __name__ == "__main__":
doença = 'chik'
STATE = 'RJ'
if STATE == 'RJ':
cities = [3304557, 3303500, 3301009, 3304904]
elif STATE == 'CE':
cities = [2304400, 2307650]
for CITY in cities:
model, preds, preds25, preds975, X_data, targets, data_lag = qf_prediction(CITY, STATE,
horizon=PREDICTION_WINDOW,
lookback=LOOK_BACK, doenca=doença)
# Save cross-predictions
with open(f'saved_models/quantile_forest/{STATE}/{CITY}_cross_{doença}_preditions.pkl','wb') as f:
pickle.dump({'xdata': X_data, 'target': targets, 'pred': preds, 'ub': preds975, 'lb': preds25}, f)
| gpl-3.0 |
apache/spark | python/pyspark/sql/tests/test_arrow.py | 15 | 27974 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from distutils.version import LooseVersion
from pyspark import SparkContext, SparkConf
from pyspark.sql import Row, SparkSession
from pyspark.sql.functions import rand, udf
from pyspark.sql.types import StructType, StringType, IntegerType, LongType, \
FloatType, DoubleType, DecimalType, DateType, TimestampType, BinaryType, StructField, \
ArrayType, NullType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
# Test fallback
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false"
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false"
# Enable Arrow optimization in this tests.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
cls.schema_wo_null = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True),
StructField("9_binary_t", BinaryType(), True)])
cls.schema = cls.schema_wo_null.add("10_null_t", NullType(), True)
cls.data_wo_null = [
(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1), bytearray(b"a")),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2), bytearray(b"bb")),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3), bytearray(b"ccc")),
(u"d", 4, 40, 1.0, 8.0, Decimal("8.0"),
date(2262, 4, 12), datetime(2262, 3, 3, 3, 3, 3), bytearray(b"dddd")),
]
cls.data = [tuple(list(d) + [None]) for d in cls.data_wo_null]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
ts = datetime.datetime(2015, 11, 1, 0, 30)
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
schema = StructType([StructField("a", ArrayType(TimestampType()), True)])
df = self.spark.createDataFrame([([ts],)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in str(user_warns[-1]))
assert_frame_equal(pdf, pd.DataFrame({"a": [[ts]]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("a", ArrayType(TimestampType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegex(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame(
[tuple([None for _ in range(len(self.data_wo_null[0]))])] + self.data_wo_null)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
assert_frame_equal(expected, pdf)
assert_frame_equal(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_la, pdf_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
assert_frame_equal(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_pandas_self_destruct(self):
import pyarrow as pa
rows = 2 ** 10
cols = 4
expected_bytes = rows * cols * 8
df = self.spark.range(0, rows).select(*[rand() for _ in range(cols)])
# Test the self_destruct behavior by testing _collect_as_arrow directly
allocation_before = pa.total_allocated_bytes()
batches = df._collect_as_arrow(split_batches=True)
table = pa.Table.from_batches(batches)
del batches
pdf_split = table.to_pandas(self_destruct=True, split_blocks=True, use_threads=False)
allocation_after = pa.total_allocated_bytes()
difference = allocation_after - allocation_before
# Should be around 1x the data size (table should not hold on to any memory)
self.assertGreaterEqual(difference, 0.9 * expected_bytes)
self.assertLessEqual(difference, 1.1 * expected_bytes)
with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": False}):
no_self_destruct_pdf = df.toPandas()
# Note while memory usage is 2x data size here (both table and pdf hold on to
# memory), in this case Arrow still only tracks 1x worth of memory (since the
# batches are not allocated by Arrow in this case), so we can't make any
# assertions here
with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": True}):
self_destruct_pdf = df.toPandas()
assert_frame_equal(pdf_split, no_self_destruct_pdf)
assert_frame_equal(pdf_split, self_destruct_pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def test_no_partition_frame(self):
schema = StructType([StructField("field1", StringType(), True)])
df = self.spark.createDataFrame(self.sc.emptyRDD(), schema)
pdf = df.toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "field1")
self.assertTrue(pdf.empty)
def test_propagates_spark_exception(self):
df = self.spark.range(3).toDF("i")
def raise_exception():
raise RuntimeError("My error")
exception_udf = udf(raise_exception, IntegerType())
df = df.withColumn("error", exception_udf())
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, 'My error'):
df.toPandas()
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEqual(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEqual(self.schema, df.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[5], fields[6] = fields[6], fields[5] # swap decimal with date
wrong_schema = StructType(fields)
with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "[D|d]ecimal.*got.*date"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEqual(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEqual(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEqual(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEqual(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.iloc[0, 7] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.iloc[1, 1] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.pandas.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEqual(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_map_type(self):
map_data = [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]
pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4], "m": map_data})
schema = "id long, m map<string, long>"
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema=schema)
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
self.spark.createDataFrame(pdf, schema=schema)
else:
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
result = df.collect()
result_arrow = df_arrow.collect()
self.assertEqual(len(result), len(result_arrow))
for row, row_arrow in zip(result, result_arrow):
i, m = row
_, m_arrow = row_arrow
self.assertEqual(m, map_data[i])
self.assertEqual(m_arrow, map_data[i])
def test_toPandas_with_map_type(self):
pdf = pd.DataFrame({"id": [0, 1, 2, 3],
"m": [{}, {"a": 1}, {"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}]})
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>")
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
df.toPandas()
else:
pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow, pdf_non)
def test_toPandas_with_map_type_nulls(self):
pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4],
"m": [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]})
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>")
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
df.toPandas()
else:
pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow, pdf_non)
def test_createDataFrame_with_int_col_names(self):
import numpy as np
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
ts = datetime.datetime(2015, 11, 1, 0, 30)
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame({"a": [[ts]]}), "a: array<timestamp>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in str(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a=[ts])])
def test_createDataFrame_fallback_disabled(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame({"a": [[datetime.datetime(2015, 11, 1, 0, 30)]]}),
"a: array<timestamp>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df_from_python.toPandas())
assert_frame_equal(pdf, df_from_pandas.toPandas())
# Regression test for SPARK-28003
def test_timestamp_nat(self):
dt = [pd.NaT, pd.Timestamp('2019-06-11'), None] * 100
pdf = pd.DataFrame({'time': dt})
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf)
assert_frame_equal(pdf, df_no_arrow.toPandas())
assert_frame_equal(pdf, df_arrow.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
def test_createDateFrame_with_category_type(self):
pdf = pd.DataFrame({"A": [u"a", u"b", u"c", u"a"]})
pdf["B"] = pdf["A"].astype('category')
category_first_element = dict(enumerate(pdf['B'].cat.categories))[0]
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
arrow_df = self.spark.createDataFrame(pdf)
arrow_type = arrow_df.dtypes[1][1]
result_arrow = arrow_df.toPandas()
arrow_first_category_element = result_arrow["B"][0]
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf)
spark_type = df.dtypes[1][1]
result_spark = df.toPandas()
spark_first_category_element = result_spark["B"][0]
assert_frame_equal(result_spark, result_arrow)
# ensure original category elements are string
self.assertIsInstance(category_first_element, str)
# spark data frame and arrow execution mode enabled data frame type must match pandas
self.assertEqual(spark_type, 'string')
self.assertEqual(arrow_type, 'string')
self.assertIsInstance(arrow_first_category_element, str)
self.assertIsInstance(spark_first_category_element, str)
def test_createDataFrame_with_float_index(self):
# SPARK-32098: float index should not produce duplicated or truncated Spark DataFrame
self.assertEqual(
self.spark.createDataFrame(
pd.DataFrame({'a': [1, 2, 3]}, index=[2., 3., 4.])).distinct().count(), 3)
def test_no_partition_toPandas(self):
# SPARK-32301: toPandas should work from a Spark DataFrame with no partitions
# Forward-ported from SPARK-32300.
pdf = self.spark.sparkContext.emptyRDD().toDF("col1 int").toPandas()
self.assertEqual(len(pdf), 0)
self.assertEqual(list(pdf.columns), ["col1"])
def test_createDataFrame_empty_partition(self):
pdf = pd.DataFrame({"c1": [1], "c2": ["string"]})
df = self.spark.createDataFrame(pdf)
self.assertEqual([Row(c1=1, c2='string')], df.collect())
self.assertGreater(self.spark.sparkContext.defaultParallelism, len(pdf))
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
# is a static configuration to Spark context.
@classmethod
def setUpClass(cls):
cls.spark = SparkSession(SparkContext(
'local[4]', cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k")))
# Explicitly enable Arrow and disable fallback.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def test_exception_by_max_results(self):
with self.assertRaisesRegex(Exception, "is bigger than"):
self.spark.range(0, 10000, 1, 100).toPandas()
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import * # noqa: F401
try:
import xmlrunner # type: ignore
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| gpl-3.0 |
Srisai85/scikit-learn | sklearn/ensemble/weight_boosting.py | 97 | 40773 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
sangwook236/SWDT | sw_dev/python/rnd/test/machine_learning/keras/keras_class_activation_map.py | 2 | 2826 | #!/usr/bin/env python
# coding: UTF-8
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import UpSampling2D, Conv2D
import tensorflow.keras.applications.resnet50 as resnet
import cv2
import matplotlib.pyplot as plt
def load_img(fname, input_size, preprocess_fn):
original_img = cv2.imread(fname)[:, :, ::-1]
original_size = (original_img.shape[1], original_img.shape[0])
img = cv2.resize(original_img, (input_size, input_size))
imgs = np.expand_dims(preprocess_fn(img), axis=0)
return imgs, original_img, original_size
def get_cam_model(model_class, num_classes, input_size=224, last_conv_layer='activation_49', pred_layer='fc1000'):
model = model_class(input_shape=(input_size, input_size, 3))
model.summary()
final_params = model.get_layer(pred_layer).get_weights()
final_params = (final_params[0].reshape(1, 1, -1, num_classes), final_params[1])
last_conv_output = model.get_layer(last_conv_layer).output
x = UpSampling2D(size=(32, 32), interpolation='bilinear')(last_conv_output)
x = Conv2D(filters=num_classes, kernel_size=(1, 1), name='predictions_2')(x)
cam_model = Model(inputs=model.input, outputs=[model.output, x])
cam_model.get_layer('predictions_2').set_weights(final_params)
return cam_model
def postprocess(preds, cams, top_k=1):
idxes = np.argsort(preds[0])[-top_k:]
class_activation_map = np.zeros_like(cams[0,:,:,0])
for i in idxes:
class_activation_map += cams[0,:,:,i]
return class_activation_map
# REF [site] >> https://github.com/keras-team/keras/blob/master/examples/class_activation_maps.py
# REF [paper] >> "Learning Deep Features for Discriminative Localization", CVPR 2016.
def class_activation_map_example():
# Set an appropriate image file.
image_filepath = './image.png'
# The following parameters can be changed to other models that use global average pooling
# e.g.) InceptionResnetV2 / NASNetLarge.
NETWORK_INPUT_SIZE = 224
MODEL_CLASS = resnet.ResNet50
PREPROCESS_FN = resnet.preprocess_input
LAST_CONV_LAYER = 'activation_48' #'activation_49'
PRED_LAYER = 'fc1000'
#--------------------
# Number of imagenet classes.
N_CLASSES = 1000
# Load image.
imgs, original_img, original_size = load_img(image_filepath, input_size=NETWORK_INPUT_SIZE, preprocess_fn=PREPROCESS_FN)
# Predict.
model = get_cam_model(MODEL_CLASS, N_CLASSES, NETWORK_INPUT_SIZE, LAST_CONV_LAYER, PRED_LAYER)
preds, cams = model.predict(imgs)
# Post processing.
class_activation_map = postprocess(preds, cams)
# Plot image+cam to original size.
plt.imshow(original_img, alpha=0.5)
plt.imshow(cv2.resize(class_activation_map, original_size), cmap='jet', alpha=0.5)
plt.show()
def main():
class_activation_map_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 |
upibhalla/moose-core | tests/python/test_rdesigneur_random_syn_input.py | 2 | 1402 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
# This example demonstrates random (Poisson) synaptic input to a cell.
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3. No warranty.
# Changelog:
# Thursday 20 September 2018 09:53:27 AM IST
# - Turned into a test script. Dilawar Singh <dilawars@ncbs.res.in>
import moose
import numpy as np
moose.seed( 100 )
try:
import matplotlib
except Exception as e:
print( "[INFO ] matplotlib is not found. This test wont run." )
quit()
import rdesigneur as rd
rdes = rd.rdesigneur(
cellProto = [['somaProto', 'soma', 20e-6, 200e-6]],
chanProto = [['make_glu()', 'glu']],
chanDistrib = [['glu', 'soma', 'Gbar', '1' ]],
stimList = [['soma', '0.5', 'glu', 'randsyn', '50' ]],
# Deliver stimulus to glu synapse on soma, at mean 50 Hz Poisson.
plotList = [['soma', '1', '.', 'Vm', 'Soma membrane potential']]
)
rdes.buildModel()
moose.reinit()
moose.start( 0.3 )
tables = moose.wildcardFind( '/##[TYPE=Table]' )
res = [ ]
for t in tables:
y = t.vector
u, s = np.mean(y), np.std(y)
res.append( (u,s) )
# Got these values from version compiled on Sep 20, 2018 with moose.seed set to
# 100.
expected = [(-0.051218660048699974, 0.01028490481294165)]
assert np.isclose( expected, res, atol=1e-5).all(), "Expected %s, got %s" %(expected,res)
| gpl-3.0 |
camallen/aggregation | experimental/serengeti/IAAI/bySpecies.py | 2 | 1975 | #!/usr/bin/env python
__author__ = 'greg'
from nodes import setup
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
numUser = [5,10,15,20,25]
algPercent = []
currPercent = []
speciesList = ['elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
correct = {"wildebeest":[],"zebra":[],"hartebeest":[],"gazelleThomsons":[],"buffalo":[],"impala":[],"warthog":[],"giraffe":[],"elephant":[],"human":[],"gazelleGrants":[],"guineaFowl":[],"hyenaSpotted":[],"otherBird":[],"hippopotamus":[],"reedbuck":[],"eland":[],"baboon":[],"lionFemale":[],"topi":[]}
total = {s:0. for s in correct.keys()}
print len(correct)
for j in range(1):
print j
photos,users = setup(tau=50)
for p in photos.values():
p.__sample__(25)
for u in users.values():
u.__prune__()
#initialize things using majority voting
for p in photos.values():
p.__majorityVote__()
#estimate the user's "correctness"
for u in users.values():
for s in speciesList:
u.__speciesCorrect__(s,beta=0.01)
for p in photos.values():
p.__weightedMajorityVote__()
for s in correct.keys():
correctCount = 0
for p in photos.values():
if (s in p.goldStandard):
total[s] += 1
if (s in p.goldStandard) and (s in p.contains):
correctCount += 1
correct[s].append(correctCount)
for s,c in correct.items():
print s,np.mean(c)/total[s] | apache-2.0 |
joeyginorio/Action-Understanding-with-Rational-Rules | model_src/grid_world.py | 1 | 9591 | # Joey Velez-Ginorio
# Gridworld Implementation
# ---------------------------------
from mdp import MDP
from grid import Grid
from scipy.stats import uniform
from scipy.stats import beta
from scipy.stats import expon
import numpy as np
import random
import pyprind
import matplotlib.pyplot as plt
class GridWorld(MDP):
"""
Defines a gridworld environment to be solved by an MDP!
"""
def __init__(self, grid, goalVals, discount=.99, tau=.01, epsilon=.001):
MDP.__init__(self, discount=discount, tau=tau, epsilon=epsilon)
self.goalVals = goalVals
self.grid = grid
self.setGridWorld()
self.valueIteration()
self.extractPolicy()
def isTerminal(self, state):
"""
Specifies terminal conditions for gridworld.
"""
return True if tuple(self.scalarToCoord(state)) in self.grid.objects.values() else False
def isObstacle(self, sCoord):
"""
Checks if a state is a wall or obstacle.
"""
if tuple(sCoord) in self.grid.walls:
return True
if sCoord[0] > (self.grid.row - 1) or sCoord[0] < 0:
return True
if sCoord[1] > (self.grid.col - 1) or sCoord[1] < 0:
return True
return False
def takeAction(self, sCoord, action):
"""
Receives an action value, performs associated movement.
"""
if action is 0:
return self.up(sCoord)
if action is 1:
return self.down(sCoord)
if action is 2:
return self.left(sCoord)
if action is 3:
return self.right(sCoord)
if action is 4:
return sCoord
if action is 5:
return self.upleft(sCoord)
if action is 6:
return self.upright(sCoord)
if action is 7:
return self.downleft(sCoord)
if action is 8:
return self.downright(sCoord)
def up(self, sCoord):
"""
Move agent up, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upright(self, sCoord):
"""
Move agent up and right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upleft(self, sCoord):
"""
Move agent up and left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def down(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downleft(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downright(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def left(self, sCoord):
"""
Move agent left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def right(self, sCoord):
"""
Move agent right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def coordToScalar(self, sCoord):
"""
Convert state coordinates to corresponding scalar state value.
"""
return sCoord[0]*(self.grid.col) + sCoord[1]
def scalarToCoord(self, scalar):
"""
Convert scalar state value into coordinates.
"""
return np.array([scalar / self.grid.col, scalar % self.grid.col])
def getPossibleActions(self, sCoord):
"""
Will return a list of all possible actions from a current state.
"""
possibleActions = list()
if self.up(sCoord) is not sCoord:
possibleActions.append(0)
if self.down(sCoord) is not sCoord:
possibleActions.append(1)
if self.left(sCoord) is not sCoord:
possibleActions.append(2)
if self.right(sCoord) is not sCoord:
possibleActions.append(3)
if self.upleft(sCoord) is not sCoord:
possibleActions.append(5)
if self.upright(sCoord) is not sCoord:
possibleActions.append(6)
if self.downleft(sCoord) is not sCoord:
possibleActions.append(7)
if self.downright(sCoord) is not sCoord:
possibleActions.append(8)
return possibleActions
def setGridWorld(self):
"""
Initializes states, actions, rewards, transition matrix.
"""
# Possible coordinate positions + Death State
self.s = np.arange(self.grid.row*self.grid.col + 1)
# 4 Actions {Up, Down, Left, Right}
self.a = np.arange(9)
# Reward Zones
self.r = np.zeros(len(self.s))
for i in range(len(self.grid.objects)):
self.r[self.coordToScalar(self.grid.objects.values()[i])] = self.goalVals[i]
self.r_sa = np.zeros([len(self.s),len(self.a)])
for i in range(len(self.s)):
for j in range(len(self.a)):
if j <= 4:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-1.0
else:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-np.sqrt(2)
self.r = self.r_sa
# Transition Matrix
self.t = np.zeros([len(self.s),len(self.a),len(self.s)])
for state in range(len(self.s)):
possibleActions = self.getPossibleActions(self.scalarToCoord(state))
if self.isTerminal(state):
for i in range(len(self.a)):
if i == 4:
self.t[state][4][state]=1.0
else:
self.t[state][i][len(self.s)-1] = 1.0
continue
for action in self.a:
# Up
if action == 0:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 0)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 1:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 1)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 2:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 2)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 3:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 3)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 4:
self.t[state][action][state] = 1.0
if action == 5:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 5)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 6:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 6)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 7:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 7)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 8:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 8)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
def simulate(self, state):
"""
Runs the solver for the MDP, conducts value iteration, extracts policy,
then runs simulation of problem.
NOTE: Be sure to run value iteration (solve values for states) and to
extract some policy (fill in policy vector) before running simulation
"""
# Run simulation using policy until terminal condition met
actions = ['up', 'down', 'left', 'right']
count = 0
while not self.isTerminal(state):
# Determine which policy to use (non-deterministic)
policy = self.policy[np.where(self.s == state)[0][0]]
p_policy = self.policy[np.where(self.s == state)[0][0]] / \
self.policy[np.where(self.s == state)[0][0]].sum()
# Get the parameters to perform one move
stateIndex = np.where(self.s == state)[0][0]
policyChoice = np.random.choice(policy, p=p_policy)
actionIndex = np.random.choice(np.array(np.where(self.policy[state][:] == policyChoice)).ravel())
# print actionIndex
if actionIndex <= 3:
count += 1
else:
count += np.sqrt(2)
# Take an action, move to next state
nextState = self.takeAction(self.scalarToCoord(int(stateIndex)), int(actionIndex))
nextState = self.coordToScalar(nextState)
# print "In state: {}, taking action: {}, moving to state: {}".format(
# self.scalarToCoord(state), actions[actionIndex], self.scalarToCoord(nextState))
# End game if terminal state reached
state = int(nextState)
# if self.isTerminal(state):
# print "Terminal state: {} has been reached. Simulation over.".format(self.scalarToCoord(state))
return count
| mit |
kdaily/cloudbiolinux | installed_files/ipython_config.py | 15 | 14156 | # Configuration file for ipython.
c = get_config()
c.InteractiveShell.autoindent = True
c.InteractiveShell.colors = 'Linux'
c.InteractiveShell.confirm_exit = False
c.AliasManager.user_aliases = [
('ll', 'ls -l'),
('lt', 'ls -ltr'),
]
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# Provides init_extensions() and init_code() methods, to be called after
# init_shell(), which must be implemented by subclasses.
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet').
# c.TerminalIPythonApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Create a massive crash report when IPython enconters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py
# c.TerminalIPythonApp.force_interact = False
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHON_DIR.
# c.TerminalIPythonApp.ipython_dir = u'/home/ubuntu/.ipython'
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.1 (r271:86832, Jun 25 2011, 05:09:01) \nType "copyright", "credits" or "license" for more information.\n\nIPython 0.12 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.external.pretty` to compute the format data of the
# object. If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.external.pretty` for details on how to write
# pretty printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
| mit |
IndraVikas/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 93 | 3460 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
thomasahle/numberlink | gen/mitm.py | 1 | 6461 | import random
from collections import Counter, defaultdict
import itertools
# starter altid (0,0) -> (0,1)
# Sti har formen [2, l, r]*, da man kan forlænge med 2, gå til venstre eller gå til højre.
T, L, R = range(3)
class Path:
def __init__(self, steps):
self.steps = steps
def xys(self, dx=0, dy=1):
""" Yields all positions on path """
x, y = 0, 0
yield (x, y)
for step in self.steps:
x, y = x + dx, y + dy
yield (x, y)
if step == L:
dx, dy = -dy, dx
if step == R:
dx, dy = dy, -dx
elif step == T:
x, y = x + dx, y + dy
yield (x, y)
def test(self):
""" Tests path is non-overlapping. """
ps = list(self.xys())
return len(set(ps)) == len(ps)
def test_loop(self):
""" Tests path is non-overlapping, except for first and last. """
ps = list(self.xys())
seen = set(ps)
return len(ps) == len(seen) or len(ps) == len(seen) + 1 and ps[0] == ps[-1]
def winding(self):
return self.steps.count(R) - self.steps.count(L)
def __repr__(self):
""" Path to string """
return ''.join({T: '2', R: 'R', L: 'L'}[x] for x in self.steps)
def show(self):
import matplotlib.pyplot as plt
xs, ys = zip(*self.xys())
plt.plot(xs, ys)
plt.axis('scaled')
plt.show()
def unrotate(x, y, dx, dy):
""" Inverse rotate x, y by (dx,dy), where dx,dy=0,1 means 0 degrees.
Basically rotate(dx,dy, dx,dy) = (0, 1). """
while (dx, dy) != (0, 1):
x, y, dx, dy = -y, x, -dy, dx
return x, y
class Mitm:
def __init__(self, lr_price, t_price):
self.lr_price = lr_price
self.t_price = t_price
self.inv = defaultdict(list)
self.list = []
def prepare(self, budget):
dx0, dy0 = 0, 1
for path, (x, y, dx, dy) in self._good_paths(0, 0, dx0, dy0, budget):
self.list.append((path, x, y, dx, dy))
self.inv[x, y, dx, dy].append(path)
def rand_path(self, xn, yn, dxn, dyn):
""" Returns a path, starting at (0,0) with dx,dy = (0,1)
and ending at (xn,yn) with direction (dxn, dyn) """
while True:
path, x, y, dx, dy = random.choice(self.list)
path2s = self._lookup(dx, dy, xn - x, yn - y, dxn, dyn)
if path2s:
path2 = random.choice(path2s)
joined = Path(path + path2)
if joined.test():
return joined
def rand_path2(self, xn, yn, dxn, dyn):
""" Like rand_path, but uses a combination of a fresh random walk and
the lookup table. This allows for even longer paths. """
seen = set()
path = []
while True:
seen.clear()
del path[:]
x, y, dx, dy = 0, 0, 0, 1
seen.add((x, y))
for _ in range(2 * (abs(xn) + abs(yn))):
# We sample with weights proportional to what they are in _good_paths()
step, = random.choices(
[L, R, T], [1 / self.lr_price, 1 / self.lr_price, 2 / self.t_price])
path.append(step)
x, y = x + dx, y + dy
if (x, y) in seen:
break
seen.add((x, y))
if step == L:
dx, dy = -dy, dx
if step == R:
dx, dy = dy, -dx
elif step == T:
x, y = x + dx, y + dy
if (x, y) in seen:
break
seen.add((x, y))
if (x, y) == (xn, yn):
return Path(path)
ends = self._lookup(dx, dy, xn - x, yn - y, dxn, dyn)
if ends:
return Path(tuple(path) + random.choice(ends))
def rand_loop(self, clock=0):
""" Set clock = 1 for clockwise, -1 for anti clockwise. 0 for don't care. """
while True:
# The list only contains 0,1 starting directions
path, x, y, dx, dy = random.choice(self.list)
# Look for paths ending with the same direction
path2s = self._lookup(dx, dy, -x, -y, 0, 1)
if path2s:
path2 = random.choice(path2s)
joined = Path(path + path2)
# A clockwise path has 4 R's more than L's.
if clock and joined.winding() != clock * 4:
continue
if joined.test_loop():
return joined
def _good_paths(self, x, y, dx, dy, budget, seen=None):
if seen is None:
seen = set()
if budget >= 0:
yield (), (x, y, dx, dy)
if budget <= 0:
return
seen.add((x, y)) # Remember cleaning this up (A)
x1, y1 = x + dx, y + dy
if (x1, y1) not in seen:
for path, end in self._good_paths(
x1, y1, -dy, dx, budget - self.lr_price, seen):
yield (L,) + path, end
for path, end in self._good_paths(
x1, y1, dy, -dx, budget - self.lr_price, seen):
yield (R,) + path, end
seen.add((x1, y1)) # Remember cleaning this up (B)
x2, y2 = x1 + dx, y1 + dy
if (x2, y2) not in seen:
for path, end in self._good_paths(
x2, y2, dx, dy, budget - self.t_price, seen):
yield (T,) + path, end
seen.remove((x1, y1)) # Clean up (B)
seen.remove((x, y)) # Clean up (A)
def _lookup(self, dx, dy, xn, yn, dxn, dyn):
""" Return cached paths coming out of (0,0) with direction (dx,dy)
and ending up in (xn,yn) with direction (dxn,dyn). """
# Give me a path, pointing in direction (0,1) such that when I rotate
# it to (dx, dy) it ends at xn, yn in direction dxn, dyn.
xt, yt = unrotate(xn, yn, dx, dy)
dxt, dyt = unrotate(dxn, dyn, dx, dy)
return self.inv[xt, yt, dxt, dyt]
if __name__ == '__main__':
mitm = Mitm(1, 1)
mitm.prepare(10)
for i in range(1):
mitm.rand_loop().show()
for i in range(1, 10):
mitm.rand_path2(i, i, 0, 1).show()
for i in range(1, 10):
mitm.rand_path(i, i, 0, 1).show()
| agpl-3.0 |
MikaelFuresjo/ImundboQuant | src/PreProcess.py | 1 | 169439 | """
MIT License
Copyright (c) [2016] [Mikael Furesjö]
Software = Python Scripts in the [Imundbo Quant v1.6] series
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
IMUNDBO QUANT v1.8 (Preprocessing script)
"""
import numpy as np
import pandas as pd
import time
import datetime
import os
import csv
from datetime import datetime
import sys
from matplotlib.dates import date2num
from config.IQConfig import IQConfig
from gui.console import Console
c = Console(
""" ___
| _ \ _ __ ___ _ __ _ __ ___ ___ ___ ___ ___
| |_) | '__/ _ \ '_ \| '__/ _ \ / __/ _ \/ __/ __|
| __/| | | __/ |_) | | | (_) | (_| __/\__ \__ \
|_| |_| \___| .__/|_| \___/ \___\___||___/___/
|_|
Preprocess ticker data and extract features to use optimizing machine learning system
Ticker data should be inside "instrumentsFolder", one csv file per ticker.
Each file is expected to contain comma-separated values:
["Date", "Open", "High", "Low", "Close", "Volume", "OI"]
""")
# Settings are kept in config/config.json
#########################################
# Example config relevant PreProcess:
# {
# "root": "c:\\data",
# "preProcess": {
# "folder": "PreProcess",
# "instrumentsFolder": "PreProcess\\Instrument_FX30",
# "featuresFile": "MASSIVE_IQ19p_535ft.txt"
# },
# }
config = IQConfig()
preProcessPath = config.preProcess.getFolder()
instrumentsPath = config.preProcess.getInstrumentsFolder()
instruments = os.listdir(instrumentsPath)
numInstruments = len(instruments)
print ('Reading {0} files with ticker data from {1}...'.format(numInstruments, instrumentsPath))
_justOpen = 0.0
_justHigh = 0.0
_justLow = 0.0
_justClose = 0.0
instrument = 0.0
_DateStamp = 0.0
_Return01 = 0.0
Tgt_SCH05to08 = 0.0
Tgt_SCH05to13 = 0.0
Tgt_SCH05to21 = 0.0
Tgt_SCH05to34 = 0.0
Tgt_SCH08to13 = 0.0
Tgt_SCH08to21 = 0.0
Tgt_SCH08to34 = 0.0
Tgt_SCH13to21 = 0.0
Tgt_SCH13to34 = 0.0
Tgt_SCH21to34 = 0.0
_Diff_CtoH14 = 0.0
_Diff_CtoL20 = 0.0
_Perc21_H = 0.0
_Perc21_H80 = 0.0
_EvNo5000 = 0.0
_SMA89vs377 = 0.0
_SMA_L3 = 0.0
_Diff_CtoH5 = 0.0
_Low8_L = 0.0
_Perc8_M50 = 0.0
_diffStochSign34 = 0.0
_SMA8_C = 0.0
_DiffD100_H3 = 0.0
_SMA3vs8 = 0.0
Diff_RL8_RL21 = 0.0
_SMA13_C = 0.0
_Perc200_H = 0.0
_Perc21_L = 0.0
_Diff_CtoC8 = 0.0
_BBU200 = 0.0
_Perc8_H80 = 0.0
_EvNo1000 = 0.0
_DiffU8_C = 0.0
_Perc5_H = 0.0
_Perc89_H = 0.0
_EvNo300 = 0.0
_Diff_CtoH19 = 0.0
_Perc233_H80 = 0.0
_BBU377 = 0.0
_DiffD89_C = 0.0
_Diff_CtoH8 = 0.0
_DiffD34_C = 0.0
_Diff_CtoH17 = 0.0
_diffStochSign377 = 0.0
_Perc55_M50 = 0.0
Diff_RL5_RL21 = 0.0
_SMA34_C = 0.0
_diffStochSign100 = 0.0
_STD21sign = 0.0
_stoch377Level = 0.0
_diffStochSign21 = 0.0
_diffStochSign55 = 0.0
_Diff_CtoH21 = 0.0
_Perc3_M50 = 0.0
_Diff_CtoH6 = 0.0
_Diff_CtoL9 = 0.0
_BBU300 = 0.0
_STD377vsSign = 0.0
_diffStochSign8 = 0.0
_stoch300Level = 0.0
_Diff_CtoH2 = 0.0
_Perc5_M50 = 0.0
_Low89_L = 0.0
_SMA3vs34 = 0.0
numCompleted = 0
numFailed = 0
for index, instrument in enumerate(instruments):
instrumentPath = os.path.join(instrumentsPath, instrument)
instrumentName = os.path.splitext(instrument)[0]
#Date, Open, High, Low, Close, Volume, OI = np.loadtxt(instrumentPath, delimiter=',', unpack=True,converters={ 0: bytespdate2num('%Y-%m-%d')})
print("\nProcessing {0} ({1} / {2})...".format(instrumentName, index+1, numInstruments))
try:
data = pd.read_csv(instrumentPath, parse_dates=['Date'], header=None, names=["Date", "Open", "High", "Low", "Close", "Volume", "OI"])
Date, Open, High, Low, Close, Volume, OI = data.values.T
#Date = date2num(Date) #Todo: Remove this and keep native Datetime
except Exception as e:
numFailed+=1
print("Error processing {0}: {1}. Skipping".format(instrument, e))
continue
Zeros = [1]*len(Date) #making extra data array with "1" for future calculation of MA with linear regression
numDates = len(Date)
print("Iterating {0} rows, using range [400:-35]".format(numDates))
# skip the last 35 days for making space for P/L calculation for at most 34 days
# skip the last 400 days for making space to calculate indicatiors that need 377 past days
for x in range(400, numDates-35):
### START First part - calculate on how high the Risk/Reward Ratio is for future move in 1,2,3,5,8,13,21 days
try:
# Without splitting columns (... = data.values.T above) we could iterate actual rows in the pandas dataframe and use for example row["Date"] instead
dt = Date[x]
_DateStamp = dt.strftime('%Y-%m-%d')
_dateYear = float(dt.year) #Why float?
_dateMonthOfYear = float(dt.month) #Why float?
if _dateYear > 1966 and (_dateMonthOfYear >0 or _dateMonthOfYear >0 or _dateMonthOfYear >0 ):
try:
try:
_Diff_CpLf = (np.amin(Low[x:x+6])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+6])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_05 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+7])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+7])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_06 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+8])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+8])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_07 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+9])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+9])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_08 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+10])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+10])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_09 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+11])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+11])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_10 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+12])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+12])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_11 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+13])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+13])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_12 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+14])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+14])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_13 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+15])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+15])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_14 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+16])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+16])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_15 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+17])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+17])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_16 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+18])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+18])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_17 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+19])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+19])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_18 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+20])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+20])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_19 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+21])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+21])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_20 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+22])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+22])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_21 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+23])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+23])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_22 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+24])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+24])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_23 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+25])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+25])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_24 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+26])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+26])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_25 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+27])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+27])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_26 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+28])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+28])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_27 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+29])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+29])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_28 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+30])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+30])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_29 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+31])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+31])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_30 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+32])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+32])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_31 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+33])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+33])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_32 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+34])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+34])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_33 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x:x+35])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x:x+35])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_MiniSharp_34 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
_MiniSCH05to08 = (
_MiniSharp_05 +
_MiniSharp_06 +
_MiniSharp_07 +
_MiniSharp_08
)/4
_MiniSCH05to13 = (
_MiniSharp_05 +
_MiniSharp_06 +
_MiniSharp_07 +
_MiniSharp_08 +
_MiniSharp_09 +
_MiniSharp_10 +
_MiniSharp_11 +
_MiniSharp_12 +
_MiniSharp_13
)/9
_MiniSCH05to21 = (
_MiniSharp_05 +
_MiniSharp_06 +
_MiniSharp_07 +
_MiniSharp_08 +
_MiniSharp_09 +
_MiniSharp_10 +
_MiniSharp_11 +
_MiniSharp_12 +
_MiniSharp_13 +
_MiniSharp_14 +
_MiniSharp_15 +
_MiniSharp_16 +
_MiniSharp_17 +
_MiniSharp_18 +
_MiniSharp_19 +
_MiniSharp_20 +
_MiniSharp_21
)/17
_MiniSCH05to34 = (
_MiniSharp_05 +
_MiniSharp_06 +
_MiniSharp_07 +
_MiniSharp_08 +
_MiniSharp_09 +
_MiniSharp_10 +
_MiniSharp_11 +
_MiniSharp_12 +
_MiniSharp_13 +
_MiniSharp_14 +
_MiniSharp_15 +
_MiniSharp_16 +
_MiniSharp_17 +
_MiniSharp_18 +
_MiniSharp_19 +
_MiniSharp_20 +
_MiniSharp_21 +
_MiniSharp_22 +
_MiniSharp_23 +
_MiniSharp_24 +
_MiniSharp_25 +
_MiniSharp_26 +
_MiniSharp_27 +
_MiniSharp_28 +
_MiniSharp_29 +
_MiniSharp_30 +
_MiniSharp_31 +
_MiniSharp_32 +
_MiniSharp_33 +
_MiniSharp_34
)/30
_MiniSCH08to13 = (
_MiniSharp_08 +
_MiniSharp_09 +
_MiniSharp_10 +
_MiniSharp_11 +
_MiniSharp_12 +
_MiniSharp_13
)/6
_MiniSCH08to21 = (
_MiniSharp_08 +
_MiniSharp_09 +
_MiniSharp_10 +
_MiniSharp_11 +
_MiniSharp_12 +
_MiniSharp_13 +
_MiniSharp_14 +
_MiniSharp_15 +
_MiniSharp_16 +
_MiniSharp_17 +
_MiniSharp_18 +
_MiniSharp_19 +
_MiniSharp_20 +
_MiniSharp_21
)/14
_MiniSCH08to34 = (
_MiniSharp_08 +
_MiniSharp_09 +
_MiniSharp_10 +
_MiniSharp_11 +
_MiniSharp_12 +
_MiniSharp_13 +
_MiniSharp_14 +
_MiniSharp_15 +
_MiniSharp_16 +
_MiniSharp_17 +
_MiniSharp_18 +
_MiniSharp_19 +
_MiniSharp_20 +
_MiniSharp_21 +
_MiniSharp_22 +
_MiniSharp_23 +
_MiniSharp_24 +
_MiniSharp_25 +
_MiniSharp_26 +
_MiniSharp_27 +
_MiniSharp_28 +
_MiniSharp_29 +
_MiniSharp_30 +
_MiniSharp_31 +
_MiniSharp_32 +
_MiniSharp_33 +
_MiniSharp_34
)/27
_MiniSCH13to21 = (
_MiniSharp_13 +
_MiniSharp_14 +
_MiniSharp_15 +
_MiniSharp_16 +
_MiniSharp_17 +
_MiniSharp_18 +
_MiniSharp_19 +
_MiniSharp_20 +
_MiniSharp_21
)/9
_MiniSCH13to34 = (
_MiniSharp_13 +
_MiniSharp_14 +
_MiniSharp_15 +
_MiniSharp_16 +
_MiniSharp_17 +
_MiniSharp_18 +
_MiniSharp_19 +
_MiniSharp_20 +
_MiniSharp_21 +
_MiniSharp_22 +
_MiniSharp_23 +
_MiniSharp_24 +
_MiniSharp_25 +
_MiniSharp_26 +
_MiniSharp_27 +
_MiniSharp_28 +
_MiniSharp_29 +
_MiniSharp_30 +
_MiniSharp_31 +
_MiniSharp_32 +
_MiniSharp_33 +
_MiniSharp_34
)/22
_MiniSCH21to34 = (
_MiniSharp_21 +
_MiniSharp_22 +
_MiniSharp_23 +
_MiniSharp_24 +
_MiniSharp_25 +
_MiniSharp_26 +
_MiniSharp_27 +
_MiniSharp_28 +
_MiniSharp_29 +
_MiniSharp_30 +
_MiniSharp_31 +
_MiniSharp_32 +
_MiniSharp_33 +
_MiniSharp_34
)/14
####################################################
SCH_Dxxp5 = 0.7851
SCH_Dxxn5 = -0.7787
SCH_Dxxp4 = 0.6403
SCH_Dxxn4 = -0.6315
SCH_Dxxp3 = 0.4821
SCH_Dxxn3 = -0.4707
SCH_Dxxp2 = 0.3042
SCH_Dxxn2 = -0.2913
SCH_Dxxp1 = 0.1081
SCH_Dxxn1 = -0.0950
if _MiniSCH05to08 > SCH_Dxxp5:###
Tgt_SCH05to08 = 5
elif _MiniSCH05to08 < SCH_Dxxn5:
Tgt_SCH05to08 = -5
elif _MiniSCH05to08 > SCH_Dxxp4:###
Tgt_SCH05to08 = 4
elif _MiniSCH05to08 < SCH_Dxxn4:
Tgt_SCH05to08 = -4
elif _MiniSCH05to08 > SCH_Dxxp3:###
Tgt_SCH05to08 = 3
elif _MiniSCH05to08 < SCH_Dxxn3:
Tgt_SCH05to08 = -3
elif _MiniSCH05to08 > SCH_Dxxp2:###
Tgt_SCH05to08 = 2
elif _MiniSCH05to08 < SCH_Dxxn2:
Tgt_SCH05to08 = -2
elif _MiniSCH05to08 > SCH_Dxxp1:###
Tgt_SCH05to08 = 1
elif _MiniSCH05to08 < SCH_Dxxn1:
Tgt_SCH05to08 = -1
else:
Tgt_SCH05to08 = 0
if _MiniSCH05to13 > SCH_Dxxp5:###
Tgt_SCH05to13 = 5
elif _MiniSCH05to13 < SCH_Dxxn5:
Tgt_SCH05to13 = -5
elif _MiniSCH05to13 > SCH_Dxxp4:###
Tgt_SCH05to13 = 4
elif _MiniSCH05to13 < SCH_Dxxn4:
Tgt_SCH05to13 = -4
elif _MiniSCH05to13 > SCH_Dxxp3:###
Tgt_SCH05to13 = 3
elif _MiniSCH05to13 < SCH_Dxxn3:
Tgt_SCH05to13 = -3
elif _MiniSCH05to13 > SCH_Dxxp2:###
Tgt_SCH05to13 = 2
elif _MiniSCH05to13 < SCH_Dxxn2:
Tgt_SCH05to13 = -2
elif _MiniSCH05to13 > SCH_Dxxp1:###
Tgt_SCH05to13 = 1
elif _MiniSCH05to13 < SCH_Dxxn1:
Tgt_SCH05to13 = -1
else:
Tgt_SCH05to13 = 0
if _MiniSCH05to21 > SCH_Dxxp5:###
Tgt_SCH05to21 = 5
elif _MiniSCH05to21 < SCH_Dxxn5:
Tgt_SCH05to21 = -5
elif _MiniSCH05to21 > SCH_Dxxp4:###
Tgt_SCH05to21 = 4
elif _MiniSCH05to21 < SCH_Dxxn4:
Tgt_SCH05to21 = -4
elif _MiniSCH05to21 > SCH_Dxxp3:###
Tgt_SCH05to21 = 3
elif _MiniSCH05to21 < SCH_Dxxn3:
Tgt_SCH05to21 = -3
elif _MiniSCH05to21 > SCH_Dxxp2:###
Tgt_SCH05to21 = 2
elif _MiniSCH05to21 < SCH_Dxxn2:
Tgt_SCH05to21 = -2
elif _MiniSCH05to21 > SCH_Dxxp1:###
Tgt_SCH05to21 = 1
elif _MiniSCH05to21 < SCH_Dxxn1:
Tgt_SCH05to21 = -1
else:
Tgt_SCH05to21 = 0
if _MiniSCH05to34 > SCH_Dxxp5:###
Tgt_SCH05to34 = 5
elif _MiniSCH05to34 < SCH_Dxxn5:
Tgt_SCH05to34 = -5
elif _MiniSCH05to34 > SCH_Dxxp4:###
Tgt_SCH05to34 = 4
elif _MiniSCH05to34 < SCH_Dxxn4:
Tgt_SCH05to34 = -4
elif _MiniSCH05to34 > SCH_Dxxp3:###
Tgt_SCH05to34 = 3
elif _MiniSCH05to34 < SCH_Dxxn3:
Tgt_SCH05to34 = -3
elif _MiniSCH05to34 > SCH_Dxxp2:###
Tgt_SCH05to34 = 2
elif _MiniSCH05to34 < SCH_Dxxn2:
Tgt_SCH05to34 = -2
elif _MiniSCH05to34 > SCH_Dxxp1:###
Tgt_SCH05to34 = 1
elif _MiniSCH05to34 < SCH_Dxxn1:
Tgt_SCH05to34 = -1
else:
Tgt_SCH05to34 = 0
if _MiniSCH08to13 > SCH_Dxxp5:###
Tgt_SCH08to13 = 5
elif _MiniSCH08to13 < SCH_Dxxn5:
Tgt_SCH08to13 = -5
elif _MiniSCH08to13 > SCH_Dxxp4:###
Tgt_SCH08to13 = 4
elif _MiniSCH08to13 < SCH_Dxxn4:
Tgt_SCH08to13 = -4
elif _MiniSCH08to13 > SCH_Dxxp3:###
Tgt_SCH08to13 = 3
elif _MiniSCH08to13 < SCH_Dxxn3:
Tgt_SCH08to13 = -3
elif _MiniSCH08to13 > SCH_Dxxp2:###
Tgt_SCH08to13 = 2
elif _MiniSCH08to13 < SCH_Dxxn2:
Tgt_SCH08to13 = -2
elif _MiniSCH08to13 > SCH_Dxxp1:###
Tgt_SCH08to13 = 1
elif _MiniSCH08to13 < SCH_Dxxn1:
Tgt_SCH08to13 = -1
else:
Tgt_SCH08to13 = 0
if _MiniSCH08to21 > SCH_Dxxp5:###
Tgt_SCH08to21 = 5
elif _MiniSCH08to21 < SCH_Dxxn5:
Tgt_SCH08to21 = -5
elif _MiniSCH08to21 > SCH_Dxxp4:###
Tgt_SCH08to21 = 4
elif _MiniSCH08to21 < SCH_Dxxn4:
Tgt_SCH08to21 = -4
elif _MiniSCH08to21 > SCH_Dxxp3:###
Tgt_SCH08to21 = 3
elif _MiniSCH08to21 < SCH_Dxxn3:
Tgt_SCH08to21 = -3
elif _MiniSCH08to21 > SCH_Dxxp2:###
Tgt_SCH08to21 = 2
elif _MiniSCH08to21 < SCH_Dxxn2:
Tgt_SCH08to21 = -2
elif _MiniSCH08to21 > SCH_Dxxp1:###
Tgt_SCH08to21 = 1
elif _MiniSCH08to21 < SCH_Dxxn1:
Tgt_SCH08to21 = -1
else:
Tgt_SCH08to21 = 0
if _MiniSCH08to34 > SCH_Dxxp5:###
Tgt_SCH08to34 = 5
elif _MiniSCH08to34 < SCH_Dxxn5:
Tgt_SCH08to34 = -5
elif _MiniSCH08to34 > SCH_Dxxp4:###
Tgt_SCH08to34 = 4
elif _MiniSCH08to34 < SCH_Dxxn4:
Tgt_SCH08to34 = -4
elif _MiniSCH08to34 > SCH_Dxxp3:###
Tgt_SCH08to34 = 3
elif _MiniSCH08to34 < SCH_Dxxn3:
Tgt_SCH08to34 = -3
elif _MiniSCH08to34 > SCH_Dxxp2:###
Tgt_SCH08to34 = 2
elif _MiniSCH08to34 < SCH_Dxxn2:
Tgt_SCH08to34 = -2
elif _MiniSCH08to34 > SCH_Dxxp1:###
Tgt_SCH08to34 = 1
elif _MiniSCH08to34 < SCH_Dxxn1:
Tgt_SCH08to34 = -1
else:
Tgt_SCH08to34 = 0
if _MiniSCH13to21 > SCH_Dxxp5:###
Tgt_SCH13to21 = 5
elif _MiniSCH13to21 < SCH_Dxxn5:
Tgt_SCH13to21 = -5
elif _MiniSCH13to21 > SCH_Dxxp4:###
Tgt_SCH13to21 = 4
elif _MiniSCH13to21 < SCH_Dxxn4:
Tgt_SCH13to21 = -4
elif _MiniSCH13to21 > SCH_Dxxp3:###
Tgt_SCH13to21 = 3
elif _MiniSCH13to21 < SCH_Dxxn3:
Tgt_SCH13to21 = -3
elif _MiniSCH13to21 > SCH_Dxxp2:###
Tgt_SCH13to21 = 2
elif _MiniSCH13to21 < SCH_Dxxn2:
Tgt_SCH13to21 = -2
elif _MiniSCH13to21 > SCH_Dxxp1:###
Tgt_SCH13to21 = 1
elif _MiniSCH13to21 < SCH_Dxxn1:
Tgt_SCH13to21 = -1
else:
Tgt_SCH13to21 = 0
if _MiniSCH13to34 > SCH_Dxxp5:###
Tgt_SCH13to34 = 5
elif _MiniSCH13to34 < SCH_Dxxn5:
Tgt_SCH13to34 = -5
elif _MiniSCH13to34 > SCH_Dxxp4:###
Tgt_SCH13to34 = 4
elif _MiniSCH13to34 < SCH_Dxxn4:
Tgt_SCH13to34 = -4
elif _MiniSCH13to34 > SCH_Dxxp3:###
Tgt_SCH13to34 = 3
elif _MiniSCH13to34 < SCH_Dxxn3:
Tgt_SCH13to34 = -3
elif _MiniSCH13to34 > SCH_Dxxp2:###
Tgt_SCH13to34 = 2
elif _MiniSCH13to34 < SCH_Dxxn2:
Tgt_SCH13to34 = -2
elif _MiniSCH13to34 > SCH_Dxxp1:###
Tgt_SCH13to34 = 1
elif _MiniSCH13to34 < SCH_Dxxn1:
Tgt_SCH13to34 = -1
else:
Tgt_SCH13to34 = 0
if _MiniSCH21to34 > SCH_Dxxp5:###
Tgt_SCH21to34 = 5
elif _MiniSCH21to34 < SCH_Dxxn5:
Tgt_SCH21to34 = -5
elif _MiniSCH21to34 > SCH_Dxxp4:###
Tgt_SCH21to34 = 4
elif _MiniSCH21to34 < SCH_Dxxn4:
Tgt_SCH21to34 = -4
elif _MiniSCH21to34 > SCH_Dxxp3:###
Tgt_SCH21to34 = 3
elif _MiniSCH21to34 < SCH_Dxxn3:
Tgt_SCH21to34 = -3
elif _MiniSCH21to34 > SCH_Dxxp2:###
Tgt_SCH21to34 = 2
elif _MiniSCH21to34 < SCH_Dxxn2:
Tgt_SCH21to34 = -2
elif _MiniSCH21to34 > SCH_Dxxp1:###
Tgt_SCH21to34 = 1
elif _MiniSCH21to34 < SCH_Dxxn1:
Tgt_SCH21to34 = -1
else:
Tgt_SCH21to34 = 0
########################################################3
### END Second part - calculate how Big the future move was, using a lot of averageing out to smother the result.
### START calculation of choosen list of FEATURES for the MACHINE LEARNING process ###
#Get Date info from .txt file and convet it to string format
_justOpen = float(Open[x])
_justHigh = float(High[x])
_justLow = float(Low[x])
_justClose = float(Close[x])
try:
_Diff_CpLf = (np.amin(Low[x-6:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-6:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_05 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-7:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-7:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_06 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-8:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-8:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_07 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-9:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-9:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_08 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-10:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-10:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_09 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-11:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-11:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_10 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-12:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-12:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_11 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-13:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-13:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_12 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-14:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-14:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_13 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-15:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-15:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_14 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-16:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-16:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_15 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-17:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-17:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_16 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-18:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-18:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_17 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-19:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-19:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_18 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-20:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-20:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_19 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-21:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-21:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_20 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-22:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-22:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_21 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-23:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-23:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_22 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-24:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-24:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_23 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-25:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-25:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_24 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-26:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-26:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_25 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-27:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-27:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_26 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-28:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-28:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_27 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-29:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-29:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_28 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-30:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-30:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_29 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-31:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-31:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_30 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-32:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-32:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_31 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-33:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-33:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_32 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-34:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-34:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_33 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
try:
_Diff_CpLf = (np.amin(Low[x-35:x+1])-Close[x])/Close[x]
_Diff_CpHf = (np.amax(High[x-35:x+1])-Close[x])/Close[x]
_CpHf_Less_CpLf = _Diff_CpHf - _Diff_CpLf
_ABSofDiff_CpLf = abs(_Diff_CpLf)
_ABSofDiff_CpHf = abs(_Diff_CpHf)
_KeyValueLong = _ABSofDiff_CpHf/_CpHf_Less_CpLf
_KeyValueShort = _ABSofDiff_CpLf/_CpHf_Less_CpLf
_PastSharp_34 = np.round(_KeyValueLong - _KeyValueShort,6)
except Exception as e:
pass
_PastSCH05to08 = (
_PastSharp_05 +
_PastSharp_06 +
_PastSharp_07 +
_PastSharp_08
)/4
_PastSCH05to13 = (
_PastSharp_05 +
_PastSharp_06 +
_PastSharp_07 +
_PastSharp_08 +
_PastSharp_09 +
_PastSharp_10 +
_PastSharp_11 +
_PastSharp_12 +
_PastSharp_13
)/9
_PastSCH05to21 = (
_PastSharp_05 +
_PastSharp_06 +
_PastSharp_07 +
_PastSharp_08 +
_PastSharp_09 +
_PastSharp_10 +
_PastSharp_11 +
_PastSharp_12 +
_PastSharp_13 +
_PastSharp_14 +
_PastSharp_15 +
_PastSharp_16 +
_PastSharp_17 +
_PastSharp_18 +
_PastSharp_19 +
_PastSharp_20 +
_PastSharp_21
)/17
_PastSCH05to34 = (
_PastSharp_05 +
_PastSharp_06 +
_PastSharp_07 +
_PastSharp_08 +
_PastSharp_09 +
_PastSharp_10 +
_PastSharp_11 +
_PastSharp_12 +
_PastSharp_13 +
_PastSharp_14 +
_PastSharp_15 +
_PastSharp_16 +
_PastSharp_17 +
_PastSharp_18 +
_PastSharp_19 +
_PastSharp_20 +
_PastSharp_21 +
_PastSharp_22 +
_PastSharp_23 +
_PastSharp_24 +
_PastSharp_25 +
_PastSharp_26 +
_PastSharp_27 +
_PastSharp_28 +
_PastSharp_29 +
_PastSharp_30 +
_PastSharp_31 +
_PastSharp_32 +
_PastSharp_33 +
_PastSharp_34
)/30
_PastSCH08to13 = (
_PastSharp_08 +
_PastSharp_09 +
_PastSharp_10 +
_PastSharp_11 +
_PastSharp_12 +
_PastSharp_13
)/6
_PastSCH08to21 = (
_PastSharp_08 +
_PastSharp_09 +
_PastSharp_10 +
_PastSharp_11 +
_PastSharp_12 +
_PastSharp_13 +
_PastSharp_14 +
_PastSharp_15 +
_PastSharp_16 +
_PastSharp_17 +
_PastSharp_18 +
_PastSharp_19 +
_PastSharp_20 +
_PastSharp_21
)/14
_PastSCH08to34 = (
_PastSharp_08 +
_PastSharp_09 +
_PastSharp_10 +
_PastSharp_11 +
_PastSharp_12 +
_PastSharp_13 +
_PastSharp_14 +
_PastSharp_15 +
_PastSharp_16 +
_PastSharp_17 +
_PastSharp_18 +
_PastSharp_19 +
_PastSharp_20 +
_PastSharp_21 +
_PastSharp_22 +
_PastSharp_23 +
_PastSharp_24 +
_PastSharp_25 +
_PastSharp_26 +
_PastSharp_27 +
_PastSharp_28 +
_PastSharp_29 +
_PastSharp_30 +
_PastSharp_31 +
_PastSharp_32 +
_PastSharp_33 +
_PastSharp_34
)/27
_PastSCH13to21 = (
_PastSharp_13 +
_PastSharp_14 +
_PastSharp_15 +
_PastSharp_16 +
_PastSharp_17 +
_PastSharp_18 +
_PastSharp_19 +
_PastSharp_20 +
_PastSharp_21
)/9
_PastSCH13to34 = (
_PastSharp_13 +
_PastSharp_14 +
_PastSharp_15 +
_PastSharp_16 +
_PastSharp_17 +
_PastSharp_18 +
_PastSharp_19 +
_PastSharp_20 +
_PastSharp_21 +
_PastSharp_22 +
_PastSharp_23 +
_PastSharp_24 +
_PastSharp_25 +
_PastSharp_26 +
_PastSharp_27 +
_PastSharp_28 +
_PastSharp_29 +
_PastSharp_30 +
_PastSharp_31 +
_PastSharp_32 +
_PastSharp_33 +
_PastSharp_34
)/22
_PastSCH21to34 = (
_PastSharp_21 +
_PastSharp_22 +
_PastSharp_23 +
_PastSharp_24 +
_PastSharp_25 +
_PastSharp_26 +
_PastSharp_27 +
_PastSharp_28 +
_PastSharp_29 +
_PastSharp_30 +
_PastSharp_31 +
_PastSharp_32 +
_PastSharp_33 +
_PastSharp_34
)/14
#part with date related Features
_Diff_CtoH = np.round((Close[x]-High[x])/High[x],3)
_Diff_CtoH1 = np.round((Close[x]-High[x-1])/High[x-1],3)
_Diff_CtoH2 = np.round((Close[x]-High[x-2])/High[x-2],3)
_Diff_CtoH3 = np.round((Close[x]-High[x-3])/High[x-3],3)
_Diff_CtoH4 = np.round((Close[x]-High[x-4])/High[x-4],3)
_Diff_CtoH5 = np.round((Close[x]-High[x-5])/High[x-5],3)
_Diff_CtoH6 = np.round((Close[x]-High[x-6])/High[x-6],3)
_Diff_CtoH7 = np.round((Close[x]-High[x-7])/High[x-7],3)
_Diff_CtoH8 = np.round((Close[x]-High[x-8])/High[x-8],3)
_Diff_CtoH9 = np.round((Close[x]-High[x-9])/High[x-9],3)
_Diff_CtoH10 = np.round((Close[x]-High[x-10])/High[x-10],3)
_Diff_CtoH11 = np.round((Close[x]-High[x-11])/High[x-11],3)
_Diff_CtoH12 = np.round((Close[x]-High[x-12])/High[x-12],3)
_Diff_CtoH13 = np.round((Close[x]-High[x-13])/High[x-13],3)
_Diff_CtoH14 = np.round((Close[x]-High[x-14])/High[x-14],3)
_Diff_CtoH15 = np.round((Close[x]-High[x-15])/High[x-15],3)
_Diff_CtoH16 = np.round((Close[x]-High[x-16])/High[x-16],3)
_Diff_CtoH17 = np.round((Close[x]-High[x-17])/High[x-17],3)
_Diff_CtoH18 = np.round((Close[x]-High[x-18])/High[x-18],3)
_Diff_CtoH19 = np.round((Close[x]-High[x-19])/High[x-19],3)
_Diff_CtoH20 = np.round((Close[x]-High[x-20])/High[x-20],3)
_Diff_CtoH21 = np.round((Close[x]-High[x-21])/High[x-21],3)
_Diff_CtoH22 = np.round((Close[x]-High[x-22])/High[x-22],3)
_Diff_CtoH23 = np.round((Close[x]-High[x-23])/High[x-23],3)
_Diff_CtoH24 = np.round((Close[x]-High[x-24])/High[x-24],3)
_Diff_CtoH25 = np.round((Close[x]-High[x-25])/High[x-25],3)
_Diff_CtoL = np.round((Close[x]-Low[x])/Low[x],3)
_Diff_CtoL1 = np.round((Close[x]-Low[x-1])/Low[x-1],3)
_Diff_CtoL2 = np.round((Close[x]-Low[x-2])/Low[x-2],3)
_Diff_CtoL3 = np.round((Close[x]-Low[x-3])/Low[x-3],3)
_Diff_CtoL4 = np.round((Close[x]-Low[x-4])/Low[x-4],3)
_Diff_CtoL5 = np.round((Close[x]-Low[x-5])/Low[x-5],3)
_Diff_CtoL6 = np.round((Close[x]-Low[x-6])/Low[x-6],3)
_Diff_CtoL7 = np.round((Close[x]-Low[x-7])/Low[x-7],3)
_Diff_CtoL8 = np.round((Close[x]-Low[x-8])/Low[x-8],3)
_Diff_CtoL9 = np.round((Close[x]-Low[x-9])/Low[x-9],3)
_Diff_CtoL10 = np.round((Close[x]-Low[x-10])/Low[x-10],3)
_Diff_CtoL11 = np.round((Close[x]-Low[x-11])/Low[x-11],3)
_Diff_CtoL12 = np.round((Close[x]-Low[x-12])/Low[x-12],3)
_Diff_CtoL13 = np.round((Close[x]-Low[x-13])/Low[x-13],3)
_Diff_CtoL14 = np.round((Close[x]-Low[x-14])/Low[x-14],3)
_Diff_CtoL15 = np.round((Close[x]-Low[x-15])/Low[x-15],3)
_Diff_CtoL16 = np.round((Close[x]-Low[x-16])/Low[x-16],3)
_Diff_CtoL17 = np.round((Close[x]-Low[x-17])/Low[x-17],3)
_Diff_CtoL18 = np.round((Close[x]-Low[x-18])/Low[x-18],3)
_Diff_CtoL19 = np.round((Close[x]-Low[x-19])/Low[x-19],3)
_Diff_CtoL20 = np.round((Close[x]-Low[x-20])/Low[x-20],3)
_Diff_CtoL21 = np.round((Close[x]-Low[x-21])/Low[x-21],3)
_Diff_CtoL22 = np.round((Close[x]-Low[x-22])/Low[x-22],3)
_Diff_CtoL23 = np.round((Close[x]-Low[x-23])/Low[x-23],3)
_Diff_CtoL24 = np.round((Close[x]-Low[x-24])/Low[x-24],3)
_Diff_CtoL25 = np.round((Close[x]-Low[x-25])/Low[x-25],3)
_Diff_CtoO = np.round((Close[x]-Open[x])/Open[x],3)
_Diff_CtoO1 = np.round((Close[x]-Open[x-1])/Open[x-1],3)
_Diff_CtoO2 = np.round((Close[x]-Open[x-2])/Open[x-2],3)
_Diff_CtoO3 = np.round((Close[x]-Open[x-3])/Open[x-3],3)
_Diff_CtoO4 = np.round((Close[x]-Open[x-4])/Open[x-4],3)
_Diff_CtoO5 = np.round((Close[x]-Open[x-5])/Open[x-5],3)
_Diff_CtoO6 = np.round((Close[x]-Open[x-6])/Open[x-6],3)
_Diff_CtoO7 = np.round((Close[x]-Open[x-7])/Open[x-7],3)
_Diff_CtoO8 = np.round((Close[x]-Open[x-8])/Open[x-8],3)
_Diff_CtoO9 = np.round((Close[x]-Open[x-9])/Open[x-9],3)
_Diff_CtoC1 = np.round((Close[x]-Close[x-1])/Close[x-1],3)
_Diff_CtoC2 = np.round((Close[x]-Close[x-2])/Close[x-2],3)
_Diff_CtoC3 = np.round((Close[x]-Close[x-3])/Close[x-3],3)
_Diff_CtoC4 = np.round((Close[x]-Close[x-4])/Close[x-4],3)
_Diff_CtoC5 = np.round((Close[x]-Close[x-5])/Close[x-5],3)
_Diff_CtoC6 = np.round((Close[x]-Close[x-6])/Close[x-6],3)
_Diff_CtoC7 = np.round((Close[x]-Close[x-7])/Close[x-7],3)
_Diff_CtoC8 = np.round((Close[x]-Close[x-8])/Close[x-8],3)
_Diff_CtoC9 = np.round((Close[x]-Close[x-9])/Close[x-9],3)
_SMA_H3 = np.round(np.sum(High[x-4:x+1])/5,4)
_SMA_L3 = np.round(np.sum(Low[x-4:x+1])/5,4)
_BBU3 = np.round(np.sum(Close[x+1-3:x+1])/3,3)+(round(np.std(Close[x+1-3:x+1])*2,3))
_BBD3 = np.round(np.sum(Close[x+1-3:x+1])/3,3)-(round(np.std(Close[x+1-3:x+1])*2,3))
_DiffU3_C = np.round((Close[x]-_BBU3)/_BBU3,3)
_DiffU3_L3 = np.round((_SMA_L3-_BBU3)/_BBU3,3)
_DiffD3_C = np.round((Close[x]-_BBD3)/_BBD3,3)
_DiffD3_H3 = np.round((_SMA_H3-_BBD3)/_BBD3,3)
_BBU5 = np.round(np.sum(Close[x+1-5:x+1])/5,3)+(round(np.std(Close[x+1-5:x+1])*2,3))
_BBD5 = np.round(np.sum(Close[x+1-5:x+1])/5,3)-(round(np.std(Close[x+1-5:x+1])*2,3))
_DiffU5_C = np.round((Close[x]-_BBU5)/_BBU5,3)
_DiffU5_L3 = np.round((_SMA_L3-_BBU5)/_BBU5,3)
_DiffD5_C = np.round((Close[x]-_BBD5)/_BBD5,3)
_DiffD5_H3 = np.round((_SMA_H3-_BBD5)/_BBD5,3)
_BBU8 = np.round(np.sum(Close[x+1-8:x+1])/8,3)+(round(np.std(Close[x+1-8:x+1])*2,3))
_BBD8 = np.round(np.sum(Close[x+1-8:x+1])/8,3)-(round(np.std(Close[x+1-8:x+1])*2,3))
_DiffU8_C = np.round((Close[x]-_BBU8)/_BBU8,3)
_DiffU8_L3 = np.round((_SMA_L3-_BBU8)/_BBU8,3)
_DiffD8_C = np.round((Close[x]-_BBD8)/_BBD8,3)
_DiffD8_H3 = np.round((_SMA_H3-_BBD8)/_BBD8,3)
_BBU13 = np.round(np.sum(Close[x+1-13:x+1])/13,3)+(round(np.std(Close[x+1-13:x+1])*2,3))
_BBD13 = np.round(np.sum(Close[x+1-13:x+1])/13,3)-(round(np.std(Close[x+1-13:x+1])*2,3))
_DiffU13_C = np.round((Close[x]-_BBU13)/_BBU13,3)
_DiffU13_L3 = np.round((_SMA_L3-_BBU13)/_BBU13,3)
_DiffD13_C = np.round((Close[x]-_BBD13)/_BBD13,3)
_DiffD13_H3 = np.round((_SMA_H3-_BBD13)/_BBD13,3)
_BBU21 = np.round(np.sum(Close[x+1-21:x+1])/21,3)+(round(np.std(Close[x+1-21:x+1])*2,3))
_BBD21 = np.round(np.sum(Close[x+1-21:x+1])/21,3)-(round(np.std(Close[x+1-21:x+1])*2,3))
_DiffU21_C = np.round((Close[x]-_BBU21)/_BBU21,3)
_DiffU21_L3 = np.round((_SMA_L3-_BBU21)/_BBU21,3)
_DiffD21_C = np.round((Close[x]-_BBD21)/_BBD21,3)
_DiffD21_H3 = np.round((_SMA_H3-_BBD21)/_BBD21,3)
_BBU34 = np.round(np.sum(Close[x+1-34:x+1])/34,3)+(round(np.std(Close[x+1-34:x+1])*2,3))
_BBD34 = np.round(np.sum(Close[x+1-34:x+1])/34,3)-(round(np.std(Close[x+1-34:x+1])*2,3))
_DiffU34_C = np.round((Close[x]-_BBU34)/_BBU34,3)
_DiffU34_L3 = np.round((_SMA_L3-_BBU34)/_BBU34,3)
_DiffD34_C = np.round((Close[x]-_BBD34)/_BBD34,3)
_DiffD34_H3 = np.round((_SMA_H3-_BBD34)/_BBD34,3)
_BBU55 = np.round(np.sum(Close[x+1-55:x+1])/55,3)+(round(np.std(Close[x+1-55:x+1])*2,3))
_BBD55 = np.round(np.sum(Close[x+1-55:x+1])/55,3)-(round(np.std(Close[x+1-55:x+1])*2,3))
_DiffU55_C = np.round((Close[x]-_BBU55)/_BBU55,3)
_DiffU55_L3 = np.round((_SMA_L3-_BBU55)/_BBU55,3)
_DiffD55_C = np.round((Close[x]-_BBD55)/_BBD55,3)
_DiffD55_H3 = np.round((_SMA_H3-_BBD55)/_BBD55,3)
_BBU89 = np.round(np.sum(Close[x+1-89:x+1])/89,3)+(round(np.std(Close[x+1-89:x+1])*2,3))
_BBD89 = np.round(np.sum(Close[x+1-89:x+1])/89,3)-(round(np.std(Close[x+1-89:x+1])*2,3))
_DiffU89_C = np.round((Close[x]-_BBU89)/_BBU89,3)
_DiffU89_L3 = np.round((_SMA_L3-_BBU89)/_BBU89,3)
_DiffD89_C = np.round((Close[x]-_BBD89)/_BBD89,3)
_DiffD89_H3 = np.round((_SMA_H3-_BBD89)/_BBD89,3)
_BBU100 = np.round(np.sum(Close[x+1-100:x+1])/100,3)+(round(np.std(Close[x+1-100:x+1])*2,3))
_BBD100 = np.round(np.sum(Close[x+1-100:x+1])/100,3)-(round(np.std(Close[x+1-100:x+1])*2,3))
_DiffU100_C = np.round((Close[x]-_BBU100)/_BBU100,3)
_DiffU100_L3 = np.round((_SMA_L3-_BBU100)/_BBU100,3)
_DiffD100_C = np.round((Close[x]-_BBD100)/_BBD100,3)
_DiffD100_H3 = np.round((_SMA_H3-_BBD100)/_BBD100,3)
_BBU144 = np.round(np.sum(Close[x+1-144:x+1])/144,3)+(round(np.std(Close[x+1-144:x+1])*2,3))
_BBD144 = np.round(np.sum(Close[x+1-144:x+1])/144,3)-(round(np.std(Close[x+1-144:x+1])*2,3))
_DiffU144_C = np.round((Close[x]-_BBU144)/_BBU144,3)
_DiffU144_L3 = np.round((_SMA_L3-_BBU144)/_BBU144,3)
_DiffD144_C = np.round((Close[x]-_BBD144)/_BBD144,3)
_DiffD144_H3 = np.round((_SMA_H3-_BBD144)/_BBD144,3)
_BBU200 = np.round(np.sum(Close[x+1-200:x+1])/200,3)+(round(np.std(Close[x+1-200:x+1])*2,3))
_BBD200 = np.round(np.sum(Close[x+1-200:x+1])/200,3)-(round(np.std(Close[x+1-200:x+1])*2,3))
_DiffU200_C = np.round((Close[x]-_BBU200)/_BBU200,3)
_DiffU200_L3 = np.round((_SMA_L3-_BBU200)/_BBU200,3)
_DiffD200_C = np.round((Close[x]-_BBD200)/_BBD200,3)
_DiffD200_H3 = np.round((_SMA_H3-_BBD200)/_BBD200,3)
_BBU233 = np.round(np.sum(Close[x+1-233:x+1])/233,3)+(round(np.std(Close[x+1-233:x+1])*2,3))
_BBD233 = np.round(np.sum(Close[x+1-233:x+1])/233,3)-(round(np.std(Close[x+1-233:x+1])*2,3))
_DiffU233_C = np.round((Close[x]-_BBU233)/_BBU233,3)
_DiffU233_L3 = np.round((_SMA_L3-_BBU233)/_BBU233,3)
_DiffD233_C = np.round((Close[x]-_BBD233)/_BBD233,3)
_DiffD233_H3 = np.round((_SMA_H3-_BBD233)/_BBD233,3)
_BBU300 = np.round(np.sum(Close[x+1-300:x+1])/300,3)+(round(np.std(Close[x+1-300:x+1])*2,3))
_BBD300 = np.round(np.sum(Close[x+1-300:x+1])/300,3)-(round(np.std(Close[x+1-300:x+1])*2,3))
_DiffU300_C = np.round((Close[x]-_BBU300)/_BBU300,3)
_DiffU300_L3 = np.round((_SMA_L3-_BBU300)/_BBU300,3)
_DiffD300_C = np.round((Close[x]-_BBD300)/_BBD300,3)
_DiffD300_H3 = np.round((_SMA_H3-_BBD300)/_BBD300,3)
_BBU377 = np.round(np.sum(Close[x+1-377:x+1])/377,3)+(round(np.std(Close[x+1-377:x+1])*2,3))
_BBD377 = np.round(np.sum(Close[x+1-377:x+1])/377,3)-(round(np.std(Close[x+1-377:x+1])*2,3))
_DiffU377_C = np.round((Close[x]-_BBU377)/_BBU377,3)
_DiffU377_L3 = np.round((_SMA_L3-_BBU377)/_BBU377,3)
_DiffD377_C = np.round((Close[x]-_BBD377)/_BBD377,3)
_DiffD377_H3 = np.round((_SMA_H3-_BBD377)/_BBD377,3)
# Repeated from start of loop
_dateDayOfYear = float(dt.strftime('%j'))
_dateWeekOfYear = float(dt.strftime('%W'))
_dateMonthOfYear = float(dt.strftime('%m'))
_dateDayOfMonth = float(dt.strftime('%d'))
_dateDayOfWeek = float(dt.strftime('%w'))
_EvNo5 = np.round((Close[x]-5)/5,3)
_EvNo10 = np.round((Close[x]-10)/10,3)
_EvNo20 = np.round((Close[x]-20)/20,3)
_EvNo30 = np.round((Close[x]-30)/30,3)
_EvNo40 = np.round((Close[x]-40)/40,3)
_EvNo50 = np.round((Close[x]-50)/50,3)
_EvNo60 = np.round((Close[x]-60)/60,3)
_EvNo70 = np.round((Close[x]-70)/70,3)
_EvNo80 = np.round((Close[x]-80)/80,3)
_EvNo90 = np.round((Close[x]-90)/90,3)
_EvNo100 = np.round((Close[x]-100)/100,3)
_EvNo200 = np.round((Close[x]-200)/200,3)
_EvNo300 = np.round((Close[x]-300)/300,3)
_EvNo400 = np.round((Close[x]-400)/400,3)
_EvNo500 = np.round((Close[x]-500)/500,3)
_EvNo600 = np.round((Close[x]-600)/600,3)
_EvNo700 = np.round((Close[x]-700)/700,3)
_EvNo800 = np.round((Close[x]-800)/800,3)
_EvNo900 = np.round((Close[x]-900)/900,3)
_EvNo1000 = np.round((Close[x]-1000)/1000,3)
_EvNo2000 = np.round((Close[x]-2000)/2000,3)
_EvNo3000 = np.round((Close[x]-3000)/3000,3)
_EvNo4000 = np.round((Close[x]-4000)/4000,3)
_EvNo5000 = np.round((Close[x]-5000)/5000,3)
_EvNo10000 = np.round((Close[x]-1000)/1000,3)
_Perc3_H = np.round((Close[x]-np.percentile(High[x-3:x+1],95))/np.percentile(High[x-3:x+1],95),3)
_Perc5_H = np.round((Close[x]-np.percentile(High[x-5:x+1],95))/np.percentile(High[x-5:x+1],95),3)
_Perc8_H = np.round((Close[x]-np.percentile(High[x-8:x+1],95))/np.percentile(High[x-8:x+1],95),3)
_Perc13_H = np.round((Close[x]-np.percentile(High[x-13:x+1],95))/np.percentile(High[x-13:x+1],95),3)
_Perc21_H = np.round((Close[x]-np.percentile(High[x-21:x+1],95))/np.percentile(High[x-21:x+1],95),3)
_Perc34_H = np.round((Close[x]-np.percentile(High[x-34:x+1],95))/np.percentile(High[x-34:x+1],95),3)
_Perc55_H = np.round((Close[x]-np.percentile(High[x-55:x+1],95))/np.percentile(High[x-55:x+1],95),3)
_Perc89_H = np.round((Close[x]-np.percentile(High[x-89:x+1],95))/np.percentile(High[x-89:x+1],95),3)
_Perc100_H = np.round((Close[x]-np.percentile(High[x-100:x+1],95))/np.percentile(High[x-100:x+1],95),3)
_Perc144_H = np.round((Close[x]-np.percentile(High[x-144:x+1],95))/np.percentile(High[x-144:x+1],95),3)
_Perc200_H = np.round((Close[x]-np.percentile(High[x-200:x+1],95))/np.percentile(High[x-200:x+1],95),3)
_Perc233_H = np.round((Close[x]-np.percentile(High[x-233:x+1],95))/np.percentile(High[x-233:x+1],95),3)
_Perc377_H = np.round((Close[x]-np.percentile(High[x-377:x+1],95))/np.percentile(High[x-377:x+1],95),3)
_Perc3_L = np.round((Close[x]-np.percentile(Low[x-3:x+1],5))/np.percentile(Low[x-3:x+1],5),3)
_Perc5_L = np.round((Close[x]-np.percentile(Low[x-5:x+1],5))/np.percentile(Low[x-5:x+1],5),3)
_Perc8_L = np.round((Close[x]-np.percentile(Low[x-8:x+1],5))/np.percentile(Low[x-8:x+1],5),3)
_Perc13_L = np.round((Close[x]-np.percentile(Low[x-13:x+1],5))/np.percentile(Low[x-13:x+1],5),3)
_Perc21_L = np.round((Close[x]-np.percentile(Low[x-21:x+1],5))/np.percentile(Low[x-21:x+1],5),3)
_Perc34_L = np.round((Close[x]-np.percentile(Low[x-34:x+1],5))/np.percentile(Low[x-34:x+1],5),3)
_Perc55_L = np.round((Close[x]-np.percentile(Low[x-55:x+1],5))/np.percentile(Low[x-55:x+1],5),3)
_Perc89_L = np.round((Close[x]-np.percentile(Low[x-89:x+1],5))/np.percentile(Low[x-89:x+1],5),3)
_Perc100_L = np.round((Close[x]-np.percentile(Low[x-100:x+1],5))/np.percentile(Low[x-100:x+1],5),3)
_Perc144_L = np.round((Close[x]-np.percentile(Low[x-144:x+1],5))/np.percentile(Low[x-144:x+1],5),3)
_Perc200_L = np.round((Close[x]-np.percentile(Low[x-200:x+1],5))/np.percentile(Low[x-200:x+1],5),3)
_Perc233_L = np.round((Close[x]-np.percentile(Low[x-233:x+1],5))/np.percentile(Low[x-233:x+1],5),3)
_Perc377_L = np.round((Close[x]-np.percentile(Low[x-377:x+1],5))/np.percentile(Low[x-377:x+1],5),3)
_Perc3_H80 = np.round((Close[x]-np.percentile(High[x-3:x+1],80))/np.percentile(High[x-3:x+1],80),3)
_Perc5_H80 = np.round((Close[x]-np.percentile(High[x-5:x+1],80))/np.percentile(High[x-5:x+1],80),3)
_Perc8_H80 = np.round((Close[x]-np.percentile(High[x-8:x+1],80))/np.percentile(High[x-8:x+1],80),3)
_Perc13_H80 = np.round((Close[x]-np.percentile(High[x-13:x+1],80))/np.percentile(High[x-13:x+1],80),3)
_Perc21_H80 = np.round((Close[x]-np.percentile(High[x-21:x+1],80))/np.percentile(High[x-21:x+1],80),3)
_Perc34_H80 = np.round((Close[x]-np.percentile(High[x-34:x+1],80))/np.percentile(High[x-34:x+1],80),3)
_Perc55_H80 = np.round((Close[x]-np.percentile(High[x-55:x+1],80))/np.percentile(High[x-55:x+1],80),3)
_Perc89_H80 = np.round((Close[x]-np.percentile(High[x-89:x+1],80))/np.percentile(High[x-89:x+1],80),3)
_Perc100_H80 = np.round((Close[x]-np.percentile(High[x-100:x+1],80))/np.percentile(High[x-100:x+1],80),3)
_Perc144_H80 = np.round((Close[x]-np.percentile(High[x-144:x+1],80))/np.percentile(High[x-144:x+1],80),3)
_Perc200_H80 = np.round((Close[x]-np.percentile(High[x-200:x+1],80))/np.percentile(High[x-200:x+1],80),3)
_Perc233_H80 = np.round((Close[x]-np.percentile(High[x-233:x+1],80))/np.percentile(High[x-233:x+1],80),3)
_Perc377_H80 = np.round((Close[x]-np.percentile(High[x-377:x+1],80))/np.percentile(High[x-377:x+1],80),3)
_Perc3_L20 = np.round((Close[x]-np.percentile(Low[x-3:x+1],20))/np.percentile(Low[x-3:x+1],20),3)
_Perc5_L20 = np.round((Close[x]-np.percentile(Low[x-5:x+1],20))/np.percentile(Low[x-5:x+1],20),3)
_Perc8_L20 = np.round((Close[x]-np.percentile(Low[x-8:x+1],20))/np.percentile(Low[x-8:x+1],20),3)
_Perc13_L20 = np.round((Close[x]-np.percentile(Low[x-13:x+1],20))/np.percentile(Low[x-13:x+1],20),3)
_Perc21_L20 = np.round((Close[x]-np.percentile(Low[x-21:x+1],20))/np.percentile(Low[x-21:x+1],20),3)
_Perc34_L20 = np.round((Close[x]-np.percentile(Low[x-34:x+1],20))/np.percentile(Low[x-34:x+1],20),3)
_Perc55_L20 = np.round((Close[x]-np.percentile(Low[x-55:x+1],20))/np.percentile(Low[x-55:x+1],20),3)
_Perc89_L20 = np.round((Close[x]-np.percentile(Low[x-89:x+1],20))/np.percentile(Low[x-89:x+1],20),3)
_Perc100_L20 = np.round((Close[x]-np.percentile(Low[x-100:x+1],20))/np.percentile(Low[x-100:x+1],20),3)
_Perc144_L20 = np.round((Close[x]-np.percentile(Low[x-144:x+1],20))/np.percentile(Low[x-144:x+1],20),3)
_Perc200_L20 = np.round((Close[x]-np.percentile(Low[x-200:x+1],20))/np.percentile(Low[x-200:x+1],20),3)
_Perc233_L20 = np.round((Close[x]-np.percentile(Low[x-233:x+1],20))/np.percentile(Low[x-233:x+1],20),3)
_Perc377_L20 = np.round((Close[x]-np.percentile(Low[x-377:x+1],20))/np.percentile(Low[x-377:x+1],20),3)
_Perc3_M50 = np.round((Close[x]-np.percentile(High[x-3:x+1],50))/np.percentile(High[x-3:x+1],50),3)
_Perc5_M50 = np.round((Close[x]-np.percentile(High[x-5:x+1],50))/np.percentile(High[x-5:x+1],50),3)
_Perc8_M50 = np.round((Close[x]-np.percentile(High[x-8:x+1],50))/np.percentile(High[x-8:x+1],50),3)
_Perc13_M50 = np.round((Close[x]-np.percentile(High[x-13:x+1],50))/np.percentile(High[x-13:x+1],50),3)
_Perc21_M50 = np.round((Close[x]-np.percentile(High[x-21:x+1],50))/np.percentile(High[x-21:x+1],50),3)
_Perc34_M50 = np.round((Close[x]-np.percentile(High[x-34:x+1],50))/np.percentile(High[x-34:x+1],50),3)
_Perc55_M50 = np.round((Close[x]-np.percentile(High[x-55:x+1],50))/np.percentile(High[x-55:x+1],50),3)
_Perc89_M50 = np.round((Close[x]-np.percentile(High[x-89:x+1],50))/np.percentile(High[x-89:x+1],50),3)
_Perc100_M50 = np.round((Close[x]-np.percentile(High[x-100:x+1],50))/np.percentile(High[x-100:x+1],50),3)
_Perc144_M50 = np.round((Close[x]-np.percentile(High[x-144:x+1],50))/np.percentile(High[x-144:x+1],50),3)
_Perc200_M50 = np.round((Close[x]-np.percentile(High[x-200:x+1],50))/np.percentile(High[x-200:x+1],50),3)
_Perc233_M50 = np.round((Close[x]-np.percentile(High[x-233:x+1],50))/np.percentile(High[x-233:x+1],50),3)
_Perc377_M50 = np.round((Close[x]-np.percentile(High[x-377:x+1],50))/np.percentile(High[x-377:x+1],50),3)
RL3 = np.round(np.polyfit(Zeros[x-3:x+1], Close[x-3:x+1], 0),3)[0]
RL5 = np.round(np.polyfit(Zeros[x-5:x+1], Close[x-5:x+1], 0),3)[0]
RL8 = np.round(np.polyfit(Zeros[x-8:x+1], Close[x-8:x+1], 0),3)[0]
RL13 = np.round(np.polyfit(Zeros[x-13:x+1], Close[x-13:x+1], 0),3)[0]
RL21 = np.round(np.polyfit(Zeros[x-21:x+1], Close[x-21:x+1], 0),3)[0]
RL34 = np.round(np.polyfit(Zeros[x-34:x+1], Close[x-34:x+1], 0),3)[0]
RL55 = np.round(np.polyfit(Zeros[x-55:x+1], Close[x-55:x+1], 0),3)[0]
RL89 = np.round(np.polyfit(Zeros[x-89:x+1], Close[x-89:x+1], 0),3)[0]
RL100 = np.round(np.polyfit(Zeros[x-100:x+1], Close[x-100:x+1], 0),3)[0]
RL144 = np.round(np.polyfit(Zeros[x-144:x+1], Close[x-144:x+1], 0),3)[0]
RL200 = np.round(np.polyfit(Zeros[x-200:x+1], Close[x-200:x+1], 0),3)[0]
RL233 = np.round(np.polyfit(Zeros[x-233:x+1], Close[x-233:x+1], 0),3)[0]
RL377 = np.round(np.polyfit(Zeros[x-377:x+1], Close[x-377:x+1], 0),3)[0]
Diff_C_RL3 = np.round((Close[x]-RL3)/RL3,3)
Diff_C_RL5 = np.round((Close[x]-RL5)/RL5,3)
Diff_C_RL8 = np.round((Close[x]-RL8)/RL8,3)
Diff_C_RL13 = np.round((Close[x]-RL13)/RL13,3)
Diff_C_RL21 = np.round((Close[x]-RL21)/RL21,3)
Diff_C_RL34 = np.round((Close[x]-RL34)/RL34,3)
Diff_C_RL55 = np.round((Close[x]-RL55)/RL55,3)
Diff_C_RL89 = np.round((Close[x]-RL89)/RL89,3)
Diff_C_RL100 = np.round((Close[x]-RL100)/RL100,3)
Diff_C_RL144 = np.round((Close[x]-RL144)/RL144,3)
Diff_C_RL200 = np.round((Close[x]-RL200)/RL200,3)
Diff_C_RL233 = np.round((Close[x]-RL233)/RL233,3)
Diff_C_RL377 = np.round((Close[x]-RL377)/RL377,3)
Diff_RL3_RL5 = np.round((RL3-RL5)/RL5,3)
Diff_RL3_RL8 = np.round((RL3-RL8)/RL8,3)
Diff_RL3_RL13 = np.round((RL3-RL13)/RL13,3)
Diff_RL3_RL21 = np.round((RL3-RL21)/RL21,3)
Diff_RL3_RL34 = np.round((RL3-RL34)/RL34,3)
Diff_RL5_RL8 = np.round((RL5-RL8)/RL8,3)
Diff_RL5_RL13 = np.round((RL5-RL13)/RL13,3)
Diff_RL5_RL21 = np.round((RL5-RL21)/RL21,3)
Diff_RL5_RL34 = np.round((RL5-RL34)/RL34,3)
Diff_RL5_RL55 = np.round((RL5-RL55)/RL55,3)
Diff_RL8_RL13 = np.round((RL8-RL13)/RL13,3)
Diff_RL8_RL21 = np.round((RL8-RL21)/RL21,3)
Diff_RL8_RL34 = np.round((RL8-RL34)/RL34,3)
Diff_RL8_RL55 = np.round((RL8-RL55)/RL55,3)
Diff_RL8_RL89 = np.round((RL8-RL89)/RL89,3)
Diff_RL13_RL21 = np.round((RL13-RL21)/RL21,3)
Diff_RL13_RL34 = np.round((RL13-RL34)/RL34,3)
Diff_RL13_RL55 = np.round((RL13-RL55)/RL55,3)
Diff_RL13_RL139 = np.round((RL13-RL89)/RL89,3)
Diff_RL13_RL100 = np.round((RL13-RL100)/RL100,3)
Diff_RL21_RL34 = np.round((RL21-RL34)/RL34,3)
Diff_RL21_RL55 = np.round((RL21-RL55)/RL55,3)
Diff_RL21_RL89 = np.round((RL21-RL89)/RL89,3)
Diff_RL21_RL100 = np.round((RL21-RL100)/RL100,3)
Diff_RL21_RL144 = np.round((RL21-RL144)/RL144,3)
Diff_RL34_RL55 = np.round((RL34-RL55)/RL55,3)
Diff_RL34_RL89 = np.round((RL34-RL89)/RL89,3)
Diff_RL34_RL100 = np.round((RL34-RL100)/RL100,3)
Diff_RL34_RL144 = np.round((RL34-RL144)/RL144,3)
Diff_RL34_RL200 = np.round((RL34-RL200)/RL200,3)
Diff_RL55_RL89 = np.round((RL55-RL89)/RL89,3)
Diff_RL55_RL100 = np.round((RL55-RL100)/RL100,3)
Diff_RL55_RL144 = np.round((RL55-RL144)/RL144,3)
Diff_RL55_RL200 = np.round((RL55-RL200)/RL200,3)
Diff_RL55_RL233 = np.round((RL55-RL233)/RL233,3)
Diff_RL89_RL100 = np.round((RL89-RL100)/RL100,3)
Diff_RL89_RL144 = np.round((RL89-RL144)/RL144,3)
Diff_RL89_RL200 = np.round((RL89-RL200)/RL200,3)
Diff_RL89_RL233 = np.round((RL89-RL233)/RL233,3)
Diff_RL89_RL377 = np.round((RL89-RL377)/RL377,3)
Diff_RL100_RL144 = np.round((RL100-RL144)/RL144,3)
Diff_RL100_RL200 = np.round((RL100-RL200)/RL200,3)
Diff_RL100_RL233 = np.round((RL100-RL233)/RL233,3)
Diff_RL100_RL377 = np.round((RL100-RL377)/RL377,3)
Diff_RL144_RL200 = np.round((RL144-RL200)/RL200,3)
Diff_RL144_RL233 = np.round((RL144-RL233)/RL233,3)
Diff_RL144_RL377 = np.round((RL144-RL377)/RL377,3)
Diff_RL200_RL233 = np.round((RL200-RL233)/RL233,3)
Diff_RL200_RL377 = np.round((RL200-RL377)/RL377,3)
Diff_RL233_RL377 = np.round((RL233-RL377)/RL377,3)
_SMA3_C = np.round((Close[x]-(np.sum(Close[x-3:x+1])/3))/(np.sum(Close[x-3:x+1])/3),3)
_SMA5_C = np.round((Close[x]-(np.sum(Close[x-5:x+1])/5))/(np.sum(Close[x-5:x+1])/5),3)
_SMA8_C = np.round((Close[x]-(np.sum(Close[x-8:x+1])/8))/(np.sum(Close[x-8:x+1])/8),3)
_SMA13_C = np.round((Close[x]-(np.sum(Close[x-13:x+1])/13))/(np.sum(Close[x-13:x+1])/13),3)
_SMA21_C = np.round((Close[x]-(np.sum(Close[x-21:x+1])/21))/(np.sum(Close[x-21:x+1])/21),3)
_SMA34_C = np.round((Close[x]-(np.sum(Close[x-34:x+1])/34))/(np.sum(Close[x-34:x+1])/34),3)
_SMA55_C = np.round((Close[x]-(np.sum(Close[x-55:x+1])/55))/(np.sum(Close[x-55:x+1])/55),3)
_SMA89_C = np.round((Close[x]-(np.sum(Close[x-89:x+1])/89))/(np.sum(Close[x-89:x+1])/89),3)
_SMA144_C = np.round((Close[x]-(np.sum(Close[x-144:x+1])/144))/(np.sum(Close[x-144:x+1])/144),3)
_SMA233_C = np.round((Close[x]-(np.sum(Close[x-233:x+1])/233))/(np.sum(Close[x-233:x+1])/233),3)
_SMA377_C = np.round((Close[x]-(np.sum(Close[x-377:x+1])/377))/(np.sum(Close[x-377:x+1])/377),3)
_SMA100_C = np.round((Close[x]-(np.sum(Close[x-100:x+1])/100))/(np.sum(Close[x-100:x+1])/100),3)
_SMA200_C = np.round((Close[x]-(np.sum(Close[x-200:x+1])/200))/(np.sum(Close[x-200:x+1])/200),3)
_SMA300_C = np.round((Close[x]-(np.sum(Close[x-300:x+1])/300))/(np.sum(Close[x-300:x+1])/300),3)
_SMA3vs5 = np.round((_SMA3_C-_SMA5_C)/_SMA5_C,3)
_SMA3vs8 = np.round((_SMA3_C-_SMA8_C)/_SMA8_C,3)
_SMA3vs13 = np.round((_SMA3_C-_SMA13_C)/_SMA13_C,3)
_SMA3vs21 = np.round((_SMA3_C-_SMA21_C)/_SMA21_C,3)
_SMA3vs34 = np.round((_SMA3_C-_SMA34_C)/_SMA34_C,3)
_SMA5vs8 = np.round((_SMA5_C-_SMA8_C)/_SMA8_C,3)
_SMA5vs13 = np.round((_SMA5_C-_SMA13_C)/_SMA13_C,3)
_SMA5vs21 = np.round((_SMA5_C-_SMA21_C)/_SMA21_C,3)
_SMA5vs34 = np.round((_SMA5_C-_SMA34_C)/_SMA34_C,3)
_SMA5vs55 = np.round((_SMA5_C-_SMA55_C)/_SMA55_C,3)
_SMA8vs13 = np.round((_SMA8_C-_SMA13_C)/_SMA13_C,3)
_SMA8vs21 = np.round((_SMA8_C-_SMA21_C)/_SMA21_C,3)
_SMA8vs34 = np.round((_SMA8_C-_SMA34_C)/_SMA34_C,3)
_SMA8vs55 = np.round((_SMA8_C-_SMA55_C)/_SMA55_C,3)
_SMA8vs89 = np.round((_SMA8_C-_SMA89_C)/_SMA89_C,3)
_SMA13vs21 = np.round((_SMA13_C-_SMA21_C)/_SMA21_C,3)
_SMA13vs34 = np.round((_SMA13_C-_SMA34_C)/_SMA34_C,3)
_SMA13vs55 = np.round((_SMA13_C-_SMA55_C)/_SMA55_C,3)
_SMA13vs89 = np.round((_SMA13_C-_SMA89_C)/_SMA89_C,3)
_SMA13vs144 = np.round((_SMA13_C-_SMA144_C)/_SMA144_C,3)
_SMA21vs34 = np.round((_SMA21_C-_SMA34_C)/_SMA34_C,3)
_SMA21vs55 = np.round((_SMA21_C-_SMA55_C)/_SMA55_C,3)
_SMA21vs89 = np.round((_SMA21_C-_SMA89_C)/_SMA89_C,3)
_SMA21vs144 = np.round((_SMA21_C-_SMA144_C)/_SMA144_C,3)
_SMA21vs233 = np.round((_SMA21_C-_SMA233_C)/_SMA233_C,3)
_SMA34vs55 = np.round((_SMA34_C-_SMA55_C)/_SMA55_C,3)
_SMA34vs89 = np.round((_SMA34_C-_SMA89_C)/_SMA89_C,3)
_SMA34vs144 = np.round((_SMA34_C-_SMA144_C)/_SMA144_C,3)
_SMA34vs233 = np.round((_SMA34_C-_SMA233_C)/_SMA233_C,3)
_SMA34vs377 = np.round((_SMA34_C-_SMA377_C)/_SMA377_C,3)
_SMA55vs89 = np.round((_SMA55_C-_SMA89_C)/_SMA89_C,3)
_SMA55vs144 = np.round((_SMA55_C-_SMA144_C)/_SMA144_C,3)
_SMA55vs233 = np.round((_SMA55_C-_SMA233_C)/_SMA233_C,3)
_SMA55vs377 = np.round((_SMA55_C-_SMA377_C)/_SMA377_C,3)
_SMA89vs144 = np.round((_SMA89_C-_SMA144_C)/_SMA144_C,3)
_SMA89vs233 = np.round((_SMA89_C-_SMA233_C)/_SMA233_C,3)
_SMA89vs377 = np.round((_SMA89_C-_SMA377_C)/_SMA377_C,3)
_SMA144vs233 = np.round((_SMA144_C-_SMA233_C)/_SMA233_C,3)
_SMA144vs377 = np.round((_SMA144_C-_SMA377_C)/_SMA377_C,3)
_SMA233vs377 = np.round((_SMA233_C-_SMA377_C)/_SMA377_C,3)
_STD3_C = np.round(np.std(Close[x-3:x+1])/Close[x],3)
_STD3_C1m = np.round(np.std(Close[x-4:x])/Close[x-1],3)
_STD3_C2m = np.round(np.std(Close[x-5:x-1])/Close[x-2],3)
_STD3_C3m = np.round(np.std(Close[x-6:x-2])/Close[x-3],3)
_STD3_C4m = np.round(np.std(Close[x-7:x-3])/Close[x-4],3)
_STD3sign = np.round((_STD3_C + _STD3_C1m + _STD3_C2m + _STD3_C3m + _STD3_C4m)/5,3)
_STD3vsSign = np.round((_STD3_C-_STD3sign)/_STD3sign,3)
_STD5_C = np.round(np.std(Close[x-5:x+1])/Close[x],3)
_STD5_C1m = np.round(np.std(Close[x-6:x])/Close[x-1],3)
_STD5_C2m = np.round(np.std(Close[x-7:x-1])/Close[x-2],3)
_STD5_C3m = np.round(np.std(Close[x-8:x-2])/Close[x-3],3)
_STD5_C4m = np.round(np.std(Close[x-9:x-3])/Close[x-4],3)
_STD5sign = np.round((_STD5_C + _STD5_C1m + _STD5_C2m + _STD5_C3m + _STD5_C4m)/5,3)
_STD5vsSign = np.round((_STD5_C-_STD5sign)/_STD5sign,3)
_STD8_C = np.round(np.std(Close[x-8:x+1])/Close[x],3)
_STD8_C1m = np.round(np.std(Close[x-9:x])/Close[x-1],3)
_STD8_C2m = np.round(np.std(Close[x-10:x-1])/Close[x-2],3)
_STD8_C3m = np.round(np.std(Close[x-11:x-2])/Close[x-3],3)
_STD8_C4m = np.round(np.std(Close[x-12:x-3])/Close[x-4],3)
_STD8sign = np.round((_STD8_C + _STD8_C1m + _STD8_C2m + _STD8_C3m + _STD8_C4m)/5,3)
_STD8vsSign = np.round((_STD8_C-_STD8sign)/_STD8sign,3)
_STD13_C = np.round(np.std(Close[x-13:x+1])/Close[x],3)
_STD13_C1m = np.round(np.std(Close[x-14:x])/Close[x-1],3)
_STD13_C2m = np.round(np.std(Close[x-15:x-1])/Close[x-2],3)
_STD13_C3m = np.round(np.std(Close[x-16:x-2])/Close[x-3],3)
_STD13_C4m = np.round(np.std(Close[x-17:x-3])/Close[x-4],3)
_STD13sign = np.round((_STD13_C + _STD13_C1m + _STD13_C2m + _STD13_C3m + _STD13_C4m)/5,3)
_STD13vsSign = np.round((_STD13_C-_STD13sign)/_STD13sign,3)
_STD21_C = np.round(np.std(Close[x-21:x+1])/Close[x],3)
_STD21_C1m = np.round(np.std(Close[x-22:x])/Close[x-1],3)
_STD21_C2m = np.round(np.std(Close[x-23:x-1])/Close[x-2],3)
_STD21_C3m = np.round(np.std(Close[x-24:x-2])/Close[x-3],3)
_STD21_C4m = np.round(np.std(Close[x-25:x-3])/Close[x-4],3)
_STD21sign = np.round((_STD21_C + _STD21_C1m + _STD21_C2m + _STD21_C3m + _STD21_C4m)/5,3)
_STD21vsSign = np.round((_STD21_C-_STD21sign)/_STD21sign,3)
_STD34_C = np.round(np.std(Close[x-34:x+1])/Close[x],3)
_STD34_C1m = np.round(np.std(Close[x-35:x])/Close[x-1],3)
_STD34_C2m = np.round(np.std(Close[x-36:x-1])/Close[x-2],3)
_STD34_C3m = np.round(np.std(Close[x-37:x-2])/Close[x-3],3)
_STD34_C4m = np.round(np.std(Close[x-38:x-3])/Close[x-4],3)
_STD34sign = np.round((_STD34_C + _STD34_C1m + _STD34_C2m + _STD34_C3m + _STD34_C4m)/5,3)
_STD34vsSign = np.round((_STD34_C-_STD34sign)/_STD34sign,3)
_STD55_C = np.round(np.std(Close[x-55:x+1])/Close[x],3)
_STD55_C1m = np.round(np.std(Close[x-56:x])/Close[x-1],3)
_STD55_C2m = np.round(np.std(Close[x-57:x-1])/Close[x-2],3)
_STD55_C3m = np.round(np.std(Close[x-58:x-2])/Close[x-3],3)
_STD55_C4m = np.round(np.std(Close[x-59:x-3])/Close[x-4],3)
_STD55sign = np.round((_STD55_C + _STD55_C1m + _STD55_C2m + _STD55_C3m + _STD55_C4m)/5,3)
_STD55vsSign = np.round((_STD55_C-_STD55sign)/_STD55sign,3)
_STD89_C = np.round(np.std(Close[x-89:x+1])/Close[x],3)
_STD89_C1m = np.round(np.std(Close[x-90:x])/Close[x-1],3)
_STD89_C2m = np.round(np.std(Close[x-91:x-1])/Close[x-2],3)
_STD89_C3m = np.round(np.std(Close[x-92:x-2])/Close[x-3],3)
_STD89_C4m = np.round(np.std(Close[x-93:x-3])/Close[x-4],3)
_STD89sign = np.round((_STD89_C + _STD89_C1m + _STD89_C2m + _STD89_C3m + _STD89_C4m)/5,3)
_STD89vsSign = np.round((_STD89_C-_STD89sign)/_STD89sign,3)
_STD144_C = np.round(np.std(Close[x-144:x+1])/Close[x],3)
_STD144_C1m = np.round(np.std(Close[x-145:x])/Close[x-1],3)
_STD144_C2m = np.round(np.std(Close[x-146:x-1])/Close[x-2],3)
_STD144_C3m = np.round(np.std(Close[x-147:x-2])/Close[x-3],3)
_STD144_C4m = np.round(np.std(Close[x-148:x-3])/Close[x-4],3)
_STD144sign = np.round((_STD144_C + _STD144_C1m + _STD144_C2m + _STD144_C3m + _STD144_C4m)/5,3)
_STD144vsSign = np.round((_STD144_C-_STD144sign)/_STD144sign,3)
_STD233_C = np.round(np.std(Close[x-233:x+1])/Close[x],3)
_STD233_C1m = np.round(np.std(Close[x-234:x])/Close[x-1],3)
_STD233_C2m = np.round(np.std(Close[x-235:x-1])/Close[x-2],3)
_STD233_C3m = np.round(np.std(Close[x-236:x-2])/Close[x-3],3)
_STD233_C4m = np.round(np.std(Close[x-237:x-3])/Close[x-4],3)
_STD233sign = np.round((_STD233_C + _STD233_C1m + _STD233_C2m + _STD233_C3m + _STD233_C4m)/5,3)
_STD233vsSign = np.round((_STD233_C-_STD233sign)/_STD233sign,3)
_STD377_C = np.round(np.std(Close[x-377:x+1])/Close[x],3)
_STD377_C1m = np.round(np.std(Close[x-378:x])/Close[x-1],3)
_STD377_C2m = np.round(np.std(Close[x-379:x-1])/Close[x-2],3)
_STD377_C3m = np.round(np.std(Close[x-380:x-2])/Close[x-3],3)
_STD377_C4m = np.round(np.std(Close[x-381:x-3])/Close[x-4],3)
_STD377sign = np.round((_STD377_C + _STD377_C1m + _STD377_C2m + _STD377_C3m + _STD377_C4m)/5,3)
_STD377vsSign = np.round((_STD377_C-_STD377sign)/_STD377sign,3)
_STD100_C = np.round(np.std(Close[x-100:x+1])/Close[x],3)
_STD100_C1m = np.round(np.std(Close[x-101:x])/Close[x-1],3)
_STD100_C2m = np.round(np.std(Close[x-102:x-1])/Close[x-2],3)
_STD100_C3m = np.round(np.std(Close[x-103:x-2])/Close[x-3],3)
_STD100_C4m = np.round(np.std(Close[x-104:x-3])/Close[x-4],3)
_STD100sign = np.round((_STD100_C + _STD100_C1m + _STD100_C2m + _STD100_C3m + _STD100_C4m)/5,3)
_STD100vsSign = np.round((_STD100_C-_STD100sign)/_STD100sign,3)
_STD200_C = np.round(np.std(Close[x-200:x+1])/Close[x],3)
_STD200_C1m = np.round(np.std(Close[x-201:x])/Close[x-1],3)
_STD200_C2m = np.round(np.std(Close[x-202:x-1])/Close[x-2],3)
_STD200_C3m = np.round(np.std(Close[x-203:x-2])/Close[x-3],3)
_STD200_C4m = np.round(np.std(Close[x-204:x-3])/Close[x-4],3)
_STD200sign = np.round((_STD200_C + _STD200_C1m + _STD200_C2m + _STD200_C3m + _STD200_C4m)/5,3)
_STD200vsSign = np.round((_STD200_C-_STD200sign)/_STD200sign,3)
_STD300_C = np.round(np.std(Close[x-300:x+1])/Close[x],3)
_STD300_C1m = np.round(np.std(Close[x-301:x])/Close[x-1],3)
_STD300_C2m = np.round(np.std(Close[x-302:x-1])/Close[x-2],3)
_STD300_C3m = np.round(np.std(Close[x-303:x-2])/Close[x-3],3)
_STD300_C4m = np.round(np.std(Close[x-304:x-3])/Close[x-4],3)
_STD300sign = np.round((_STD300_C + _STD300_C1m + _STD300_C2m + _STD300_C3m + _STD300_C4m)/5,3)
_STD300vsSign = np.round((_STD300_C-_STD300sign)/_STD300sign,3)
_stoch5 = np.round((Close[x]-np.amin(Low[x-5:x+1]))/(np.amax(High[x-5:x+1])-np.amin(Low[x-5:x+1]))*100,3)
_stoch5m1 = np.round((Close[x-1]-np.amin(Low[x-6:x]))/(np.amax(High[x-6:x])-np.amin(Low[x-6:x]))*100,3)
_stoch5m2 = np.round((Close[x-2]-np.amin(Low[x-7:x-1]))/(np.amax(High[x-7:x-1])-np.amin(Low[x-7:x-1]))*100,3)
_stoch5m3 = np.round((Close[x-3]-np.amin(Low[x-8:x-2]))/(np.amax(High[x-8:x-2])-np.amin(Low[x-8:x-2]))*100,3)
_stoch5m4 = np.round((Close[x-4]-np.amin(Low[x-9:x-3]))/(np.amax(High[x-9:x-3])-np.amin(Low[x-9:x-3]))*100,3)
_sign5Stoch5 = np.round((_stoch5+_stoch5m1+_stoch5m2+_stoch5m3+_stoch5m4)/5,3)
_diffStochSign5 = np.round((_stoch5-_sign5Stoch5)/_sign5Stoch5,2)
if _stoch5 > 80:
_stoch5Level = 1.0
elif _stoch5 < 20:
_stoch5Level = -1.0
else:
_stoch5Level = 0.0
_stoch14 = np.round((Close[x]-np.amin(Low[x-14:x+1]))/(np.amax(High[x-14:x+1])-np.amin(Low[x-14:x+1]))*100,3)
_stoch8 = np.round((Close[x]-np.amin(Low[x-8:x+1]))/(np.amax(High[x-8:x+1])-np.amin(Low[x-8:x+1]))*100,3)
_stoch8m1 = np.round((Close[x-1]-np.amin(Low[x-9:x]))/(np.amax(High[x-9:x])-np.amin(Low[x-9:x]))*100,3)
_stoch8m2 = np.round((Close[x-2]-np.amin(Low[x-10:x-1]))/(np.amax(High[x-10:x-1])-np.amin(Low[x-10:x-1]))*100,3)
_stoch8m3 = np.round((Close[x-3]-np.amin(Low[x-11:x-2]))/(np.amax(High[x-11:x-2])-np.amin(Low[x-11:x-2]))*100,3)
_stoch8m4 = np.round((Close[x-4]-np.amin(Low[x-12:x-3]))/(np.amax(High[x-12:x-3])-np.amin(Low[x-12:x-3]))*100,3)
_sign5Stoch8 = np.round((_stoch8+_stoch8m1+_stoch8m2+_stoch8m3+_stoch8m4)/5,3)
_diffStochSign8 = np.round((_stoch14-_sign5Stoch8)/_sign5Stoch8,2)
if _stoch8 > 80:
_stoch8Level = 1.0
elif _stoch8 < 20:
_stoch8Level = -1.0
else:
_stoch8Level = 0.0
_stoch14m1 = np.round((Close[x-1]-np.amin(Low[x-15:x]))/(np.amax(High[x-14:x+1])-np.amin(Low[x-14:x+1]))*100,3)
_stoch14m2 = np.round((Close[x-2]-np.amin(Low[x-16:x-1]))/(np.amax(High[x-14:x+1])-np.amin(Low[x-14:x+1]))*100,3)
_stoch14m3 = np.round((Close[x-3]-np.amin(Low[x-17:x-2]))/(np.amax(High[x-14:x+1])-np.amin(Low[x-14:x+1]))*100,3)
_stoch14m4 = np.round((Close[x-4]-np.amin(Low[x-18:x-3]))/(np.amax(High[x-14:x+1])-np.amin(Low[x-14:x+1]))*100,3)
_sign5Stoch8 = np.round((_stoch14+_stoch14m1+_stoch14m2+_stoch14m3+_stoch14m4)/5,1)
_diffStochSign14 = np.round((_stoch14-_sign5Stoch8)/_sign5Stoch8,3)
if _stoch14 > 80:
_stoch14Level = 1.0
elif _stoch14 < 20:
_stoch14Level = -1.0
else:
_stoch14Level = 0.0
_stoch21 = np.round((Close[x]-np.amin(Low[x-21:x+1]))/(np.amax(High[x-21:x+1])-np.amin(Low[x-21:x+1]))*100,3)
_stoch21m1 = np.round((Close[x-1]-np.amin(Low[x-22:x]))/(np.amax(High[x-22:x])-np.amin(Low[x-22:x]))*100,3)
_stoch21m2 = np.round((Close[x-2]-np.amin(Low[x-23:x-1]))/(np.amax(High[x-23:x-1])-np.amin(Low[x-23:x-1]))*100,3)
_stoch21m3 = np.round((Close[x-3]-np.amin(Low[x-24:x-2]))/(np.amax(High[x-24:x-2])-np.amin(Low[x-24:x-2]))*100,3)
_stoch21m4 = np.round((Close[x-4]-np.amin(Low[x-25:x-3]))/(np.amax(High[x-25:x-3])-np.amin(Low[x-25:x-3]))*100,3)
_sign5Stoch21 = np.round((_stoch21+_stoch21m1+_stoch21m2+_stoch21m3+_stoch21m4)/5,3)
_diffStochSign21 = np.round((_stoch21-_sign5Stoch21)/_sign5Stoch21,2)
if _stoch21 > 80:
_stoch21Level = 1.0
elif _stoch21 < 20:
_stoch21Level = -1.0
else:
_stoch21Level = 0.0
_stoch34 = np.round((Close[x]-np.amin(Low[x-34:x+1]))/(np.amax(High[x-34:x+1])-np.amin(Low[x-34:x+1]))*100,3)
_stoch34m1 = np.round((Close[x-1]-np.amin(Low[x-35:x]))/(np.amax(High[x-35:x])-np.amin(Low[x-35:x]))*100,3)
_stoch34m2 = np.round((Close[x-2]-np.amin(Low[x-36:x-1]))/(np.amax(High[x-36:x-1])-np.amin(Low[x-36:x-1]))*100,3)
_stoch34m3 = np.round((Close[x-3]-np.amin(Low[x-37:x-2]))/(np.amax(High[x-37:x-2])-np.amin(Low[x-37:x-2]))*100,3)
_stoch34m4 = np.round((Close[x-4]-np.amin(Low[x-38:x-3]))/(np.amax(High[x-38:x-3])-np.amin(Low[x-38:x-3]))*100,3)
_sign5Stoch34 = np.round((_stoch34+_stoch34m1+_stoch34m2+_stoch34m3+_stoch34m4)/5,3)
_diffStochSign34 = np.round((_stoch34-_sign5Stoch34)/_sign5Stoch34,2)
if _stoch34 > 80:
_stoch34Level = 1.0
elif _stoch34 < 20:
_stoch34Level = -1.0
else:
_stoch34Level = 0.0
_stoch55 = np.round((Close[x]-np.amin(Low[x-55:x+1]))/(np.amax(High[x-55:x+1])-np.amin(Low[x-55:x+1]))*100,3)
_stoch55m1 = np.round((Close[x-1]-np.amin(Low[x-56:x]))/(np.amax(High[x-56:x])-np.amin(Low[x-56:x]))*100,3)
_stoch55m2 = np.round((Close[x-2]-np.amin(Low[x-57:x-1]))/(np.amax(High[x-57:x-1])-np.amin(Low[x-57:x-1]))*100,3)
_stoch55m3 = np.round((Close[x-3]-np.amin(Low[x-58:x-2]))/(np.amax(High[x-58:x-2])-np.amin(Low[x-58:x-2]))*100,3)
_stoch55m4 = np.round((Close[x-4]-np.amin(Low[x-59:x-3]))/(np.amax(High[x-59:x-3])-np.amin(Low[x-59:x-3]))*100,3)
_sign5Stoch55 = np.round((_stoch55+_stoch55m1+_stoch55m2+_stoch55m3+_stoch55m4)/5,3)
_diffStochSign55 = np.round((_stoch55-_sign5Stoch55)/_sign5Stoch55,2)
if _stoch55 > 80:
_stoch55Level = 1.0
elif _stoch55 < 20:
_stoch55Level = -1.0
else:
_stoch55Level = 0.0
_stoch89 = np.round((Close[x]-np.amin(Low[x-89:x+1]))/(np.amax(High[x-89:x+1])-np.amin(Low[x-89:x+1]))*100,3)
_stoch89m1 = np.round((Close[x-1]-np.amin(Low[x-90:x]))/(np.amax(High[x-90:x])-np.amin(Low[x-90:x]))*100,3)
_stoch89m2 = np.round((Close[x-2]-np.amin(Low[x-91:x-1]))/(np.amax(High[x-91:x-1])-np.amin(Low[x-91:x-1]))*100,3)
_stoch89m3 = np.round((Close[x-3]-np.amin(Low[x-92:x-2]))/(np.amax(High[x-92:x-2])-np.amin(Low[x-92:x-2]))*100,3)
_stoch89m4 = np.round((Close[x-4]-np.amin(Low[x-93:x-3]))/(np.amax(High[x-93:x-3])-np.amin(Low[x-93:x-3]))*100,3)
_sign5Stoch89 = np.round((_stoch89+_stoch89m1+_stoch89m2+_stoch89m3+_stoch89m4)/5,3)
_diffStochSign89 = np.round((_stoch89-_sign5Stoch89)/_sign5Stoch89,2)
if _stoch89 > 80:
_stoch89Level = 1.0
elif _stoch89 < 20:
_stoch89Level = -1.0
else:
_stoch89Level = 0.0
_stoch144 = np.round((Close[x]-np.amin(Low[x-144:x+1]))/(np.amax(High[x-144:x+1])-np.amin(Low[x-144:x+1]))*100,3)
_stoch144m1 = np.round((Close[x-1]-np.amin(Low[x-145:x]))/(np.amax(High[x-145:x])-np.amin(Low[x-145:x]))*100,3)
_stoch144m2 = np.round((Close[x-2]-np.amin(Low[x-146:x-1]))/(np.amax(High[x-146:x-1])-np.amin(Low[x-146:x-1]))*100,3)
_stoch144m3 = np.round((Close[x-3]-np.amin(Low[x-147:x-2]))/(np.amax(High[x-147:x-2])-np.amin(Low[x-147:x-2]))*100,3)
_stoch144m4 = np.round((Close[x-4]-np.amin(Low[x-148:x-3]))/(np.amax(High[x-148:x-3])-np.amin(Low[x-148:x-3]))*100,3)
_sign5Stoch144 = np.round((_stoch144+_stoch144m1+_stoch144m2+_stoch144m3+_stoch144m4)/5,3)
_diffStochSign144 = np.round((_stoch144-_sign5Stoch144)/_sign5Stoch144,2)
if _stoch144 > 80:
_stoch144Level = 1.0
elif _stoch144 < 20:
_stoch144Level = -1.0
else:
_stoch144Level = 0.0
_stoch233 = np.round((Close[x]-np.amin(Low[x-233:x+1]))/(np.amax(High[x-233:x+1])-np.amin(Low[x-233:x+1]))*100,3)
_stoch233m1 = np.round((Close[x-1]-np.amin(Low[x-234:x]))/(np.amax(High[x-234:x])-np.amin(Low[x-234:x]))*100,3)
_stoch233m2 = np.round((Close[x-2]-np.amin(Low[x-235:x-1]))/(np.amax(High[x-235:x-1])-np.amin(Low[x-235:x-1]))*100,3)
_stoch233m3 = np.round((Close[x-3]-np.amin(Low[x-236:x-2]))/(np.amax(High[x-236:x-2])-np.amin(Low[x-236:x-2]))*100,3)
_stoch233m4 = np.round((Close[x-4]-np.amin(Low[x-237:x-3]))/(np.amax(High[x-237:x-3])-np.amin(Low[x-237:x-3]))*100,3)
_sign5Stoch233 = np.round((_stoch233+_stoch233m1+_stoch233m2+_stoch233m3+_stoch233m4)/5,3)
_diffStochSign233 = np.round((_stoch233-_sign5Stoch233)/_sign5Stoch233,2)
if _stoch233 > 80:
_stoch233Level = 1.0
elif _stoch233 < 20:
_stoch233Level = -1.0
else:
_stoch233Level = 0.0
_stoch377 = np.round((Close[x]-np.amin(Low[x-377:x+1]))/(np.amax(High[x-377:x+1])-np.amin(Low[x-377:x+1]))*100,3)
_stoch377m1 = np.round((Close[x-1]-np.amin(Low[x-378:x]))/(np.amax(High[x-378:x])-np.amin(Low[x-378:x]))*100,3)
_stoch377m2 = np.round((Close[x-2]-np.amin(Low[x-379:x-1]))/(np.amax(High[x-379:x-1])-np.amin(Low[x-379:x-1]))*100,3)
_stoch377m3 = np.round((Close[x-3]-np.amin(Low[x-380:x-2]))/(np.amax(High[x-380:x-2])-np.amin(Low[x-380:x-2]))*100,3)
_stoch377m4 = np.round((Close[x-4]-np.amin(Low[x-381:x-3]))/(np.amax(High[x-381:x-3])-np.amin(Low[x-381:x-3]))*100,3)
_sign5Stoch377 = np.round((_stoch377+_stoch377m1+_stoch377m2+_stoch377m3+_stoch377m4)/5,3)
_diffStochSign377 = np.round((_stoch377-_sign5Stoch377)/_sign5Stoch377,2)
if _stoch377 > 80:
_stoch377Level = 1.0
elif _stoch377 < 20:
_stoch377Level = -1.0
else:
_stoch377Level = 0.0
_stoch100 = np.round((Close[x]-np.amin(Low[x-100:x+1]))/(np.amax(High[x-100:x+1])-np.amin(Low[x-100:x+1]))*100,3)
_stoch100m1 = np.round((Close[x-1]-np.amin(Low[x-101:x]))/(np.amax(High[x-101:x])-np.amin(Low[x-101:x]))*100,3)
_stoch100m2 = np.round((Close[x-2]-np.amin(Low[x-102:x-1]))/(np.amax(High[x-102:x-1])-np.amin(Low[x-102:x-1]))*100,3)
_stoch100m3 = np.round((Close[x-3]-np.amin(Low[x-103:x-2]))/(np.amax(High[x-103:x-2])-np.amin(Low[x-103:x-2]))*100,3)
_stoch100m4 = np.round((Close[x-4]-np.amin(Low[x-104:x-3]))/(np.amax(High[x-104:x-3])-np.amin(Low[x-104:x-3]))*100,3)
_sign5Stoch100 = np.round((_stoch100+_stoch100m1+_stoch100m2+_stoch100m3+_stoch100m4)/5,3)
_diffStochSign100 = np.round((_stoch100-_sign5Stoch100)/_sign5Stoch100,2)
if _stoch100 > 80:
_stoch100Level = 1.0
elif _stoch100 < 20:
_stoch100Level = -1.0
else:
_stoch100Level = 0.0
_stoch200 = np.round((Close[x]-np.amin(Low[x-200:x+1]))/(np.amax(High[x-200:x+1])-np.amin(Low[x-200:x+1]))*100,3)
_stoch200m1 = np.round((Close[x-1]-np.amin(Low[x-201:x]))/(np.amax(High[x-201:x])-np.amin(Low[x-201:x]))*100,3)
_stoch200m2 = np.round((Close[x-2]-np.amin(Low[x-202:x-1]))/(np.amax(High[x-202:x-1])-np.amin(Low[x-202:x-1]))*100,3)
_stoch200m3 = np.round((Close[x-3]-np.amin(Low[x-203:x-2]))/(np.amax(High[x-203:x-2])-np.amin(Low[x-203:x-2]))*100,3)
_stoch200m4 = np.round((Close[x-4]-np.amin(Low[x-204:x-3]))/(np.amax(High[x-204:x-3])-np.amin(Low[x-204:x-3]))*100,3)
_sign5Stoch200 = np.round((_stoch200+_stoch200m1+_stoch200m2+_stoch200m3+_stoch200m4)/5,3)
_diffStochSign200 = np.round((_stoch200-_sign5Stoch200)/_sign5Stoch200,2)
if _stoch200 > 80:
_stoch200Level = 1.0
elif _stoch200 < 20:
_stoch200Level = -1.0
else:
_stoch200Level = 0.0
_stoch300 = np.round((Close[x]-np.amin(Low[x-300:x+1]))/(np.amax(High[x-300:x+1])-np.amin(Low[x-300:x+1]))*100,3)
_stoch300m1 = np.round((Close[x-1]-np.amin(Low[x-301:x]))/(np.amax(High[x-301:x])-np.amin(Low[x-301:x]))*100,3)
_stoch300m2 = np.round((Close[x-2]-np.amin(Low[x-302:x-1]))/(np.amax(High[x-302:x-1])-np.amin(Low[x-302:x-1]))*100,3)
_stoch300m3 = np.round((Close[x-3]-np.amin(Low[x-303:x-2]))/(np.amax(High[x-303:x-2])-np.amin(Low[x-303:x-2]))*100,3)
_stoch300m4 = np.round((Close[x-4]-np.amin(Low[x-304:x-3]))/(np.amax(High[x-304:x-3])-np.amin(Low[x-304:x-3]))*100,3)
_sign5Stoch300 = np.round((_stoch300+_stoch300m1+_stoch300m2+_stoch300m3+_stoch300m4)/5,3)
_diffStochSign300 = np.round((_stoch300-_sign5Stoch300)/_sign5Stoch300,2)
if _stoch300 > 80:
_stoch300Level = 1.0
elif _stoch300 < 20:
_stoch300Level = -1.0
else:
_stoch300Level = 0.0
_Low3_L = np.round((Close[x]-np.amin(Low[x+1-3:x+1]))/np.amin(Low[x+1-3:x+1]),3)
_Low4_L = np.round((Close[x]-np.amin(Low[x+1-4:x+1]))/np.amin(Low[x+1-4:x+1]),3)
_Low5_L = np.round((Close[x]-np.amin(Low[x+1-5:x+1]))/np.amin(Low[x+1-5:x+1]),3)
_Low6_L = np.round((Close[x]-np.amin(Low[x+1-6:x+1]))/np.amin(Low[x+1-6:x+1]),3)
_Low7_L = np.round((Close[x]-np.amin(Low[x+1-7:x+1]))/np.amin(Low[x+1-7:x+1]),3)
_Low8_L = np.round((Close[x]-np.amin(Low[x+1-8:x+1]))/np.amin(Low[x+1-8:x+1]),3)
_Low9_L = np.round((Close[x]-np.amin(Low[x+1-9:x+1]))/np.amin(Low[x+1-9:x+1]),3)
_Low10_L = np.round((Close[x]-np.amin(Low[x+1-10:x+1]))/np.amin(Low[x+1-10:x+1]),3)
_Low11_L = np.round((Close[x]-np.amin(Low[x+1-11:x+1]))/np.amin(Low[x+1-11:x+1]),3)
_Low12_L = np.round((Close[x]-np.amin(Low[x+1-12:x+1]))/np.amin(Low[x+1-12:x+1]),3)
_Low13_L = np.round((Close[x]-np.amin(Low[x+1-13:x+1]))/np.amin(Low[x+1-13:x+1]),3)
_Low14_L = np.round((Close[x]-np.amin(Low[x+1-14:x+1]))/np.amin(Low[x+1-14:x+1]),3)
_Low15_L = np.round((Close[x]-np.amin(Low[x+1-15:x+1]))/np.amin(Low[x+1-15:x+1]),3)
_Low17_L = np.round((Close[x]-np.amin(Low[x+1-17:x+1]))/np.amin(Low[x+1-17:x+1]),3)
_Low19_L = np.round((Close[x]-np.amin(Low[x+1-19:x+1]))/np.amin(Low[x+1-19:x+1]),3)
_Low21_L = np.round((Close[x]-np.amin(Low[x+1-21:x+1]))/np.amin(Low[x+1-21:x+1]),3)
_Low23_L = np.round((Close[x]-np.amin(Low[x+1-23:x+1]))/np.amin(Low[x+1-23:x+1]),3)
_Low25_L = np.round((Close[x]-np.amin(Low[x+1-25:x+1]))/np.amin(Low[x+1-25:x+1]),3)
_Low34_L = np.round((Close[x]-np.amin(Low[x+1-34:x+1]))/np.amin(Low[x+1-34:x+1]),3)
_Low55_L = np.round((Close[x]-np.amin(Low[x+1-55:x+1]))/np.amin(Low[x+1-55:x+1]),3)
_Low89_L = np.round((Close[x]-np.amin(Low[x+1-89:x+1]))/np.amin(Low[x+1-89:x+1]),3)
_Low144_L = np.round((Close[x]-np.amin(Low[x+1-144:x+1]))/np.amin(Low[x+1-144:x+1]),3)
_Low233_L = np.round((Close[x]-np.amin(Low[x+1-233:x+1]))/np.amin(Low[x+1-233:x+1]),3)
_Low377_L = np.round((Close[x]-np.amin(Low[x+1-377:x+1]))/np.amin(Low[x+1-377:x+1]),3)
_High3_H = np.round((Close[x]-np.amax(High[x+1-3:x+1]))/np.amax(High[x+1-3:x+1]),3)
_High4_H = np.round((Close[x]-np.amax(High[x+1-4:x+1]))/np.amax(High[x+1-4:x+1]),3)
_High5_H = np.round((Close[x]-np.amax(High[x+1-5:x+1]))/np.amax(High[x+1-5:x+1]),3)
_High6_H = np.round((Close[x]-np.amax(High[x+1-6:x+1]))/np.amax(High[x+1-6:x+1]),3)
_High7_H = np.round((Close[x]-np.amax(High[x+1-7:x+1]))/np.amax(High[x+1-7:x+1]),3)
_High8_H = np.round((Close[x]-np.amax(High[x+1-8:x+1]))/np.amax(High[x+1-8:x+1]),3)
_High9_H = np.round((Close[x]-np.amax(High[x+1-9:x+1]))/np.amax(High[x+1-9:x+1]),3)
_High10_H = np.round((Close[x]-np.amax(High[x+1-10:x+1]))/np.amax(High[x+1-10:x+1]),3)
_High11_H = np.round((Close[x]-np.amax(High[x+1-11:x+1]))/np.amax(High[x+1-11:x+1]),3)
_High12_H = np.round((Close[x]-np.amax(High[x+1-12:x+1]))/np.amax(High[x+1-12:x+1]),3)
_High13_H = np.round((Close[x]-np.amax(High[x+1-13:x+1]))/np.amax(High[x+1-13:x+1]),3)
_High14_H = np.round((Close[x]-np.amax(High[x+1-14:x+1]))/np.amax(High[x+1-14:x+1]),3)
_High15_H = np.round((Close[x]-np.amax(High[x+1-15:x+1]))/np.amax(High[x+1-15:x+1]),3)
_High17_H = np.round((Close[x]-np.amax(High[x+1-17:x+1]))/np.amax(High[x+1-17:x+1]),3)
_High19_H = np.round((Close[x]-np.amax(High[x+1-19:x+1]))/np.amax(High[x+1-19:x+1]),3)
_High21_H = np.round((Close[x]-np.amax(High[x+1-21:x+1]))/np.amax(High[x+1-21:x+1]),3)
_High23_H = np.round((Close[x]-np.amax(High[x+1-23:x+1]))/np.amax(High[x+1-23:x+1]),3)
_High25_H = np.round((Close[x]-np.amax(High[x+1-25:x+1]))/np.amax(High[x+1-25:x+1]),3)
_High34_H = np.round((Close[x]-np.amax(High[x+1-34:x+1]))/np.amax(High[x+1-34:x+1]),3)
_High55_H = np.round((Close[x]-np.amax(High[x+1-55:x+1]))/np.amax(High[x+1-55:x+1]),3)
_High89_H = np.round((Close[x]-np.amax(High[x+1-89:x+1]))/np.amax(High[x+1-89:x+1]),3)
_High144_H = np.round((Close[x]-np.amax(High[x+1-144:x+1]))/np.amax(High[x+1-144:x+1]),3)
_High233_H = np.round((Close[x]-np.amax(High[x+1-233:x+1]))/np.amax(High[x+1-233:x+1]),3)
_High377_H = np.round((Close[x]-np.amax(High[x+1-377:x+1]))/np.amax(High[x+1-377:x+1]),3)
_Return01 = np.round((Close[x+1]-Close[x])/Close[x],4)
_Return02 = np.round((Close[x+2]-Close[x])/Close[x],4)
_Return03 = np.round((Close[x+3]-Close[x])/Close[x],4)
_Return04 = np.round((Close[x+4]-Close[x])/Close[x],4)
_Return05 = np.round((Close[x+5]-Close[x])/Close[x],4)
_Return08 = np.round((Close[x+8]-Close[x])/Close[x],4)
_Return13 = np.round((Close[x+13]-Close[x])/Close[x],4)
_Return21 = np.round((Close[x+21]-Close[x])/Close[x],4)
_Return34 = np.round((Close[x+34]-Close[x])/Close[x],4)
except Exception as e:
print("ERROR: " + str(e))
### END calculation of choosen list of FEATURES for the MACHINE LEARNING process ###
### START part where to write every Future value and Feature, day by day and intrument by instrument to .txt file to read csv style.
LocationToSave = os.path.join(preProcessPath, config.preProcess.featuresFileName)
saveFile = open(LocationToSave,'a')
lineToWrite = (
str(_justOpen) + ',' +
str(_justHigh) + ',' +
str(_justLow) + ',' +
str(_justClose) + ',' +
str(instrument) + ',' +
str(_DateStamp) + ',' +
str(_Return01) + ',' +
str(Tgt_SCH05to08) + ',' +
str(Tgt_SCH05to13) + ',' +
str(Tgt_SCH05to21) + ',' +
str(Tgt_SCH05to34) + ',' +
str(Tgt_SCH08to13) + ',' +
str(Tgt_SCH08to21) + ',' +
str(Tgt_SCH08to34) + ',' +
str(Tgt_SCH13to21) + ',' +
str(Tgt_SCH13to34) + ',' +
str(Tgt_SCH21to34) + ',' +
str(_PastSCH05to08) + ',' +
str(_PastSCH05to13) + ',' +
str(_PastSCH05to21) + ',' +
str(_PastSCH05to34) + ',' +
str(_PastSCH08to13) + ',' +
str(_PastSCH08to21) + ',' +
str(_PastSCH08to34) + ',' +
str(_PastSCH13to21) + ',' +
str(_PastSCH13to34) + ',' +
str(_PastSCH21to34) + ',' +
str(_Diff_CtoH) + ',' +
str(_Diff_CtoH1) + ',' +
str(_Diff_CtoH2) + ',' +
str(_Diff_CtoH3) + ',' +
str(_Diff_CtoH4) + ',' +
str(_Diff_CtoH5) + ',' +
str(_Diff_CtoH6) + ',' +
str(_Diff_CtoH7) + ',' +
str(_Diff_CtoH8) + ',' +
str(_Diff_CtoH9) + ',' +
str(_Diff_CtoH10) + ',' +
str(_Diff_CtoH11) + ',' +
str(_Diff_CtoH12) + ',' +
str(_Diff_CtoH13) + ',' +
str(_Diff_CtoH14) + ',' +
str(_Diff_CtoH15) + ',' +
str(_Diff_CtoH16) + ',' +
str(_Diff_CtoH17) + ',' +
str(_Diff_CtoH18) + ',' +
str(_Diff_CtoH19) + ',' +
str(_Diff_CtoH20) + ',' +
str(_Diff_CtoH21) + ',' +
str(_Diff_CtoH22) + ',' +
str(_Diff_CtoH23) + ',' +
str(_Diff_CtoH24) + ',' +
str(_Diff_CtoH25) + ',' +
str(_Diff_CtoL) + ',' +
str(_Diff_CtoL1) + ',' +
str(_Diff_CtoL2) + ',' +
str(_Diff_CtoL3) + ',' +
str(_Diff_CtoL4) + ',' +
str(_Diff_CtoL5) + ',' +
str(_Diff_CtoL6) + ',' +
str(_Diff_CtoL7) + ',' +
str(_Diff_CtoL8) + ',' +
str(_Diff_CtoL9) + ',' +
str(_Diff_CtoL10) + ',' +
str(_Diff_CtoL11) + ',' +
str(_Diff_CtoL12) + ',' +
str(_Diff_CtoL13) + ',' +
str(_Diff_CtoL14) + ',' +
str(_Diff_CtoL15) + ',' +
str(_Diff_CtoL16) + ',' +
str(_Diff_CtoL17) + ',' +
str(_Diff_CtoL18) + ',' +
str(_Diff_CtoL19) + ',' +
str(_Diff_CtoL20) + ',' +
str(_Diff_CtoL21) + ',' +
str(_Diff_CtoL22) + ',' +
str(_Diff_CtoL23) + ',' +
str(_Diff_CtoL24) + ',' +
str(_Diff_CtoL25) + ',' +
str(_Diff_CtoO) + ',' +
str(_Diff_CtoO1) + ',' +
str(_Diff_CtoO2) + ',' +
str(_Diff_CtoO3) + ',' +
str(_Diff_CtoO4) + ',' +
str(_Diff_CtoO5) + ',' +
str(_Diff_CtoO6) + ',' +
str(_Diff_CtoO7) + ',' +
str(_Diff_CtoO8) + ',' +
str(_Diff_CtoO9) + ',' +
str(_Diff_CtoC1) + ',' +
str(_Diff_CtoC2) + ',' +
str(_Diff_CtoC3) + ',' +
str(_Diff_CtoC4) + ',' +
str(_Diff_CtoC5) + ',' +
str(_Diff_CtoC6) + ',' +
str(_Diff_CtoC7) + ',' +
str(_Diff_CtoC8) + ',' +
str(_Diff_CtoC9) + ',' +
str(_SMA_H3) + ',' +
str(_SMA_L3) + ',' +
str(_BBU3) + ',' +
str(_BBD3) + ',' +
str(_DiffU3_C) + ',' +
str(_DiffU3_L3) + ',' +
str(_DiffD3_C) + ',' +
str(_DiffD3_H3) + ',' +
str(_BBU5) + ',' +
str(_BBD5) + ',' +
str(_DiffU5_C) + ',' +
str(_DiffU5_L3) + ',' +
str(_DiffD5_C) + ',' +
str(_DiffD5_H3) + ',' +
str(_BBU8) + ',' +
str(_BBD8) + ',' +
str(_DiffU8_C) + ',' +
str(_DiffU8_L3) + ',' +
str(_DiffD8_C) + ',' +
str(_DiffD8_H3) + ',' +
str(_BBU13) + ',' +
str(_BBD13) + ',' +
str(_DiffU13_C) + ',' +
str(_DiffU13_L3) + ',' +
str(_DiffD13_C) + ',' +
str(_DiffD13_H3) + ',' +
str(_BBU21) + ',' +
str(_BBD21) + ',' +
str(_DiffU21_C) + ',' +
str(_DiffU21_L3) + ',' +
str(_DiffD21_C) + ',' +
str(_DiffD21_H3) + ',' +
str(_BBU34) + ',' +
str(_BBD34) + ',' +
str(_DiffU34_C) + ',' +
str(_DiffU34_L3) + ',' +
str(_DiffD34_C) + ',' +
str(_DiffD34_H3) + ',' +
str(_BBU55) + ',' +
str(_BBD55) + ',' +
str(_DiffU55_C) + ',' +
str(_DiffU55_L3) + ',' +
str(_DiffD55_C) + ',' +
str(_DiffD55_H3) + ',' +
str(_BBU89) + ',' +
str(_BBD89) + ',' +
str(_DiffU89_C) + ',' +
str(_DiffU89_L3) + ',' +
str(_DiffD89_C) + ',' +
str(_DiffD89_H3) + ',' +
str(_BBU100) + ',' +
str(_BBD100) + ',' +
str(_DiffU100_C) + ',' +
str(_DiffU100_L3) + ',' +
str(_DiffD100_C) + ',' +
str(_DiffD100_H3) + ',' +
str(_BBU144) + ',' +
str(_BBD144) + ',' +
str(_DiffU144_C) + ',' +
str(_DiffU144_L3) + ',' +
str(_DiffD144_C) + ',' +
str(_DiffD144_H3) + ',' +
str(_BBU200) + ',' +
str(_BBD200) + ',' +
str(_DiffU200_C) + ',' +
str(_DiffU200_L3) + ',' +
str(_DiffD200_C) + ',' +
str(_DiffD200_H3) + ',' +
str(_BBU233) + ',' +
str(_BBD233) + ',' +
str(_DiffU233_C) + ',' +
str(_DiffU233_L3) + ',' +
str(_DiffD233_C) + ',' +
str(_DiffD233_H3) + ',' +
str(_BBU300) + ',' +
str(_BBD300) + ',' +
str(_DiffU300_C) + ',' +
str(_DiffU300_L3) + ',' +
str(_DiffD300_C) + ',' +
str(_DiffD300_H3) + ',' +
str(_BBU377) + ',' +
str(_BBD377) + ',' +
str(_DiffU377_C) + ',' +
str(_DiffU377_L3) + ',' +
str(_DiffD377_C) + ',' +
str(_DiffD377_H3) + ',' +
str(_dateDayOfYear) + ',' +
str(_dateWeekOfYear) + ',' +
str(_dateMonthOfYear) + ',' +
str(_dateDayOfMonth) + ',' +
str(_dateDayOfWeek) + ',' +
str(_EvNo5) + ',' +
str(_EvNo10) + ',' +
str(_EvNo20) + ',' +
str(_EvNo30) + ',' +
str(_EvNo40) + ',' +
str(_EvNo50) + ',' +
str(_EvNo60) + ',' +
str(_EvNo70) + ',' +
str(_EvNo80) + ',' +
str(_EvNo90) + ',' +
str(_EvNo100) + ',' +
str(_EvNo200) + ',' +
str(_EvNo300) + ',' +
str(_EvNo400) + ',' +
str(_EvNo500) + ',' +
str(_EvNo600) + ',' +
str(_EvNo700) + ',' +
str(_EvNo800) + ',' +
str(_EvNo900) + ',' +
str(_EvNo1000) + ',' +
str(_EvNo2000) + ',' +
str(_EvNo3000) + ',' +
str(_EvNo4000) + ',' +
str(_EvNo5000) + ',' +
str(_EvNo10000) + ',' +
str(_Perc3_H) + ',' +
str(_Perc5_H) + ',' +
str(_Perc8_H) + ',' +
str(_Perc13_H) + ',' +
str(_Perc21_H) + ',' +
str(_Perc34_H) + ',' +
str(_Perc55_H) + ',' +
str(_Perc89_H) + ',' +
str(_Perc100_H) + ',' +
str(_Perc144_H) + ',' +
str(_Perc200_H) + ',' +
str(_Perc233_H) + ',' +
str(_Perc377_H) + ',' +
str(_Perc3_L) + ',' +
str(_Perc5_L) + ',' +
str(_Perc8_L) + ',' +
str(_Perc13_L) + ',' +
str(_Perc21_L) + ',' +
str(_Perc34_L) + ',' +
str(_Perc55_L) + ',' +
str(_Perc89_L) + ',' +
str(_Perc100_L) + ',' +
str(_Perc144_L) + ',' +
str(_Perc200_L) + ',' +
str(_Perc233_L) + ',' +
str(_Perc377_L) + ',' +
str(_Perc3_H80) + ',' +
str(_Perc5_H80) + ',' +
str(_Perc8_H80) + ',' +
str(_Perc13_H80) + ',' +
str(_Perc21_H80) + ',' +
str(_Perc34_H80) + ',' +
str(_Perc55_H80) + ',' +
str(_Perc89_H80) + ',' +
str(_Perc100_H80) + ',' +
str(_Perc144_H80) + ',' +
str(_Perc200_H80) + ',' +
str(_Perc233_H80) + ',' +
str(_Perc377_H80) + ',' +
str(_Perc3_L20) + ',' +
str(_Perc5_L20) + ',' +
str(_Perc8_L20) + ',' +
str(_Perc13_L20) + ',' +
str(_Perc21_L20) + ',' +
str(_Perc34_L20) + ',' +
str(_Perc55_L20) + ',' +
str(_Perc89_L20) + ',' +
str(_Perc100_L20) + ',' +
str(_Perc144_L20) + ',' +
str(_Perc200_L20) + ',' +
str(_Perc233_L20) + ',' +
str(_Perc377_L20) + ',' +
str(_Perc3_M50) + ',' +
str(_Perc5_M50) + ',' +
str(_Perc8_M50) + ',' +
str(_Perc13_M50) + ',' +
str(_Perc21_M50) + ',' +
str(_Perc34_M50) + ',' +
str(_Perc55_M50) + ',' +
str(_Perc89_M50) + ',' +
str(_Perc100_M50) + ',' +
str(_Perc144_M50) + ',' +
str(_Perc200_M50) + ',' +
str(_Perc233_M50) + ',' +
str(_Perc377_M50) + ',' +
str(RL3) + ',' +
str(RL5) + ',' +
str(RL8) + ',' +
str(RL13) + ',' +
str(RL21) + ',' +
str(RL34) + ',' +
str(RL55) + ',' +
str(RL89) + ',' +
str(RL100) + ',' +
str(RL144) + ',' +
str(RL200) + ',' +
str(RL233) + ',' +
str(RL377) + ',' +
str(Diff_C_RL3) + ',' +
str(Diff_C_RL5) + ',' +
str(Diff_C_RL8) + ',' +
str(Diff_C_RL13) + ',' +
str(Diff_C_RL21) + ',' +
str(Diff_C_RL34) + ',' +
str(Diff_C_RL55) + ',' +
str(Diff_C_RL89) + ',' +
str(Diff_C_RL100) + ',' +
str(Diff_C_RL144) + ',' +
str(Diff_C_RL200) + ',' +
str(Diff_C_RL233) + ',' +
str(Diff_C_RL377) + ',' +
str(Diff_RL3_RL5) + ',' +
str(Diff_RL3_RL8) + ',' +
str(Diff_RL3_RL13) + ',' +
str(Diff_RL3_RL21) + ',' +
str(Diff_RL3_RL34) + ',' +
str(Diff_RL5_RL8) + ',' +
str(Diff_RL5_RL13) + ',' +
str(Diff_RL5_RL21) + ',' +
str(Diff_RL5_RL34) + ',' +
str(Diff_RL5_RL55) + ',' +
str(Diff_RL8_RL13) + ',' +
str(Diff_RL8_RL21) + ',' +
str(Diff_RL8_RL34) + ',' +
str(Diff_RL8_RL55) + ',' +
str(Diff_RL8_RL89) + ',' +
str(Diff_RL13_RL21) + ',' +
str(Diff_RL13_RL34) + ',' +
str(Diff_RL13_RL55) + ',' +
str(Diff_RL13_RL139) + ',' +
str(Diff_RL13_RL100) + ',' +
str(Diff_RL21_RL34) + ',' +
str(Diff_RL21_RL55) + ',' +
str(Diff_RL21_RL89) + ',' +
str(Diff_RL21_RL100) + ',' +
str(Diff_RL21_RL144) + ',' +
str(Diff_RL34_RL55) + ',' +
str(Diff_RL34_RL89) + ',' +
str(Diff_RL34_RL100) + ',' +
str(Diff_RL34_RL144) + ',' +
str(Diff_RL34_RL200) + ',' +
str(Diff_RL55_RL89) + ',' +
str(Diff_RL55_RL100) + ',' +
str(Diff_RL55_RL144) + ',' +
str(Diff_RL55_RL200) + ',' +
str(Diff_RL55_RL233) + ',' +
str(Diff_RL89_RL100) + ',' +
str(Diff_RL89_RL144) + ',' +
str(Diff_RL89_RL200) + ',' +
str(Diff_RL89_RL233) + ',' +
str(Diff_RL89_RL377) + ',' +
str(Diff_RL100_RL144) + ',' +
str(Diff_RL100_RL200) + ',' +
str(Diff_RL100_RL233) + ',' +
str(Diff_RL100_RL377) + ',' +
str(Diff_RL144_RL200) + ',' +
str(Diff_RL144_RL233) + ',' +
str(Diff_RL144_RL377) + ',' +
str(Diff_RL200_RL233) + ',' +
str(Diff_RL200_RL377) + ',' +
str(Diff_RL233_RL377) + ',' +
str(_SMA3_C) + ',' +
str(_SMA5_C) + ',' +
str(_SMA8_C) + ',' +
str(_SMA13_C) + ',' +
str(_SMA21_C) + ',' +
str(_SMA34_C) + ',' +
str(_SMA55_C) + ',' +
str(_SMA89_C) + ',' +
str(_SMA144_C) + ',' +
str(_SMA233_C) + ',' +
str(_SMA377_C) + ',' +
str(_SMA100_C) + ',' +
str(_SMA200_C) + ',' +
str(_SMA300_C) + ',' +
str(_SMA3vs5) + ',' +
str(_SMA3vs8) + ',' +
str(_SMA3vs13) + ',' +
str(_SMA3vs21) + ',' +
str(_SMA3vs34) + ',' +
str(_SMA5vs8) + ',' +
str(_SMA5vs13) + ',' +
str(_SMA5vs21) + ',' +
str(_SMA5vs34) + ',' +
str(_SMA5vs55) + ',' +
str(_SMA8vs13) + ',' +
str(_SMA8vs21) + ',' +
str(_SMA8vs34) + ',' +
str(_SMA8vs55) + ',' +
str(_SMA8vs89) + ',' +
str(_SMA13vs21) + ',' +
str(_SMA13vs34) + ',' +
str(_SMA13vs55) + ',' +
str(_SMA13vs89) + ',' +
str(_SMA13vs144) + ',' +
str(_SMA21vs34) + ',' +
str(_SMA21vs55) + ',' +
str(_SMA21vs89) + ',' +
str(_SMA21vs144) + ',' +
str(_SMA21vs233) + ',' +
str(_SMA34vs55) + ',' +
str(_SMA34vs89) + ',' +
str(_SMA34vs144) + ',' +
str(_SMA34vs233) + ',' +
str(_SMA34vs377) + ',' +
str(_SMA55vs89) + ',' +
str(_SMA55vs144) + ',' +
str(_SMA55vs233) + ',' +
str(_SMA55vs377) + ',' +
str(_SMA89vs144) + ',' +
str(_SMA89vs233) + ',' +
str(_SMA89vs377) + ',' +
str(_SMA144vs233) + ',' +
str(_SMA144vs377) + ',' +
str(_SMA233vs377) + ',' +
str(_STD3_C) + ',' +
str(_STD3sign) + ',' +
str(_STD3vsSign) + ',' +
str(_STD5_C) + ',' +
str(_STD5sign) + ',' +
str(_STD5vsSign) + ',' +
str(_STD8_C) + ',' +
str(_STD8sign) + ',' +
str(_STD8vsSign) + ',' +
str(_STD13_C) + ',' +
str(_STD13sign) + ',' +
str(_STD13vsSign) + ',' +
str(_STD21_C) + ',' +
str(_STD21sign) + ',' +
str(_STD21vsSign) + ',' +
str(_STD34_C) + ',' +
str(_STD34sign) + ',' +
str(_STD34vsSign) + ',' +
str(_STD55_C) + ',' +
str(_STD55sign) + ',' +
str(_STD55vsSign) + ',' +
str(_STD89_C) + ',' +
str(_STD89sign) + ',' +
str(_STD89vsSign) + ',' +
str(_STD144_C) + ',' +
str(_STD144sign) + ',' +
str(_STD144vsSign) + ',' +
str(_STD233_C) + ',' +
str(_STD233sign) + ',' +
str(_STD233vsSign) + ',' +
str(_STD377_C) + ',' +
str(_STD377sign) + ',' +
str(_STD377vsSign) + ',' +
str(_STD100_C) + ',' +
str(_STD100sign) + ',' +
str(_STD100vsSign) + ',' +
str(_STD200_C) + ',' +
str(_STD200sign) + ',' +
str(_STD200vsSign) + ',' +
str(_STD300_C) + ',' +
str(_STD300sign) + ',' +
str(_STD300vsSign) + ',' +
str(_stoch5) + ',' +
str(_sign5Stoch5) + ',' +
str(_diffStochSign5) + ',' +
str(_stoch5Level) + ',' +
str(_stoch14) + ',' +
str(_stoch8) + ',' +
str(_sign5Stoch8) + ',' +
str(_diffStochSign8) + ',' +
str(_stoch8Level) + ',' +
str(_stoch14) + ',' +
str(_sign5Stoch8) + ',' +
str(_diffStochSign14) + ',' +
str(_stoch14Level) + ',' +
str(_stoch21) + ',' +
str(_sign5Stoch21) + ',' +
str(_diffStochSign21) + ',' +
str(_stoch21Level) + ',' +
str(_stoch34) + ',' +
str(_sign5Stoch34) + ',' +
str(_diffStochSign34) + ',' +
str(_stoch34Level) + ',' +
str(_stoch55) + ',' +
str(_sign5Stoch55) + ',' +
str(_diffStochSign55) + ',' +
str(_stoch55Level) + ',' +
str(_stoch89) + ',' +
str(_sign5Stoch89) + ',' +
str(_diffStochSign89) + ',' +
str(_stoch89Level) + ',' +
str(_stoch144) + ',' +
str(_sign5Stoch144) + ',' +
str(_diffStochSign144) + ',' +
str(_stoch144Level) + ',' +
str(_stoch233) + ',' +
str(_sign5Stoch233) + ',' +
str(_diffStochSign233) + ',' +
str(_stoch233Level) + ',' +
str(_stoch377) + ',' +
str(_sign5Stoch377) + ',' +
str(_diffStochSign377) + ',' +
str(_stoch377Level) + ',' +
str(_stoch100) + ',' +
str(_sign5Stoch100) + ',' +
str(_diffStochSign100) + ',' +
str(_stoch100Level) + ',' +
str(_stoch200) + ',' +
str(_sign5Stoch200) + ',' +
str(_diffStochSign200) + ',' +
str(_stoch200Level) + ',' +
str(_stoch300) + ',' +
str(_sign5Stoch300) + ',' +
str(_diffStochSign300) + ',' +
str(_stoch300Level) + ',' +
str(_Low3_L) + ',' +
str(_Low4_L) + ',' +
str(_Low5_L) + ',' +
str(_Low6_L) + ',' +
str(_Low7_L) + ',' +
str(_Low8_L) + ',' +
str(_Low9_L) + ',' +
str(_Low10_L) + ',' +
str(_Low11_L) + ',' +
str(_Low12_L) + ',' +
str(_Low13_L) + ',' +
str(_Low14_L) + ',' +
str(_Low15_L) + ',' +
str(_Low17_L) + ',' +
str(_Low19_L) + ',' +
str(_Low21_L) + ',' +
str(_Low23_L) + ',' +
str(_Low25_L) + ',' +
str(_Low34_L) + ',' +
str(_Low55_L) + ',' +
str(_Low89_L) + ',' +
str(_Low144_L) + ',' +
str(_Low233_L) + ',' +
str(_Low377_L) + ',' +
str(_High3_H) + ',' +
str(_High4_H) + ',' +
str(_High5_H) + ',' +
str(_High6_H) + ',' +
str(_High7_H) + ',' +
str(_High8_H) + ',' +
str(_High9_H) + ',' +
str(_High10_H) + ',' +
str(_High11_H) + ',' +
str(_High12_H) + ',' +
str(_High13_H) + ',' +
str(_High14_H) + ',' +
str(_High15_H) + ',' +
str(_High17_H) + ',' +
str(_High19_H) + ',' +
str(_High21_H) + ',' +
str(_High23_H) + ',' +
str(_High25_H) + ',' +
str(_High34_H) + ',' +
str(_High55_H) + ',' +
str(_High89_H) + ',' +
str(_High144_H) + ',' +
str(_High233_H) + ',' +
str(_High377_H) + '\n'
)
saveFile.write(lineToWrite)
saveFile.close()
else:
print("Skipped {0}".format(x))
except Exception as e:
print("ERROR: " + str(e))
print("Appended lines of features to {}".format(LocationToSave))
c.timer.print_elapsed("Completed processing of {0}".format(instrumentName))
numCompleted+=1
### END part where to write every Future value and Feature, day by day and intrument by instrument to .txt file to read csv style.
c.timer.print_elapsed('Completed preprocessing {0} files with ticker data ({1} failed) from {2}'.format(numCompleted, numFailed, instrumentPath))
| mit |
flameOnYou/mt4plus | main.py | 1 | 6204 | #coding:utf-8
'''
读取hst文件
'''
import struct
import time
import datetime
import hstutils
import json
import threading
import logging
import initEnvironment
import tushare as ts
import pandas as pd
import traceback
import requests as rq
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='log/iceblaze.log',
filemode='w')
# 配置控制台打印
# 设置控制台日志打印格式
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(console)
hst_file_path ="../history/default/"
filepath = ""
start_date = "20000101"
"""
获取历史数据
"""
def getHistory(symbol,period,point):
plist = []
if symbol == "" or symbol is None:
logging.info( "wrongful symbol")
return plist
try:
# 日线数据从临时数据获取
if period == 1440:
coreId = str(symbol)
start = start_date
end_date = datetime.datetime.now().strftime('%Y%m%d')
file_path = "ephemeral.data/"+coreId+".csv"
market = "0"
# print coreId
if coreId[0] != "6":
market = "1"
url = "http://quotes.money.163.com/service/chddata.html?code="+market+coreId+"&start="+start+"&end="+end_date
r = rq.get(url)
with open(file_path , 'wb') as f:
f.write(r.content)
f.close()
df = pd.read_csv(file_path,encoding="gbk",skiprows =1,names=["datetime","coreId","name","close","high","low","opens","before_close","Fluctuation","Chg","Turnover_rate","volume","amount","TotleMarket","CirculationMarket","volnum"])
df = df.iloc[::-1]
for unit in df.iterrows():
dataformate = "%Y-%m-%d"
dates = unit[1]['datetime']
d=datetime.datetime.strptime(dates,dataformate)
times=int(time.mktime(d.timetuple()))
opens = unit[1]['opens']
close = unit[1]['close']
high = unit[1]['high']
low = unit[1]['low']
volume = unit[1]['volume']
times = long(times)
opens = hstutils.floatHandle(opens,point)
high = hstutils.floatHandle(high,point)
low = hstutils.floatHandle(low,point)
close = hstutils.floatHandle(close,point)
volume = long(volume)
priceStruc = hstutils.PriceStruct(times, opens, high, low, close,volume)
plist.append(priceStruc)
return plist
else:
# 分钟数据从tushare获取
period = str(period)
# print "get_k_data",symbol,period
data = ts.get_k_data(symbol,ktype=period)
# print "=========",symbol,":",period
if data is None:
print "tushare is no data symbol %s period %s",symbol,period
return plist
resultlist = []
lens = len(data)
for unit in data.iterrows():
dates = unit[1]['date']
# 长度等于10的是%Y-%m-%d格式,16的是%Y-%m-%d %H:%M 格式
dataformate = "%Y-%m-%d %H:%M"
date_len = len(dates)
if date_len == 10 :
dataformate = "%Y-%m-%d"
d=datetime.datetime.strptime(dates,dataformate)
times=int(time.mktime(d.timetuple()))
opens=unit[1]['open']
close=unit[1]['close']
high=unit[1]['high']
low=unit[1]['low']
volume=unit[1]['volume']
times = long(times)
opens = hstutils.floatHandle(opens,point)
high = hstutils.floatHandle(high,point)
low = hstutils.floatHandle(low,point)
close = hstutils.floatHandle(close,point)
volume = long(volume)
priceStruc = hstutils.PriceStruct(times, opens, high, low, close,volume)
plist.append(priceStruc)
return plist
except Exception,e:
print 'str(Exception):\t', str(Exception)
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print 'e.message:\t', e.message
print 'traceback.print_exc():'
traceback.print_exc()
print 'traceback.format_exc():\n%s' % traceback.format_exc()
errormsg = "method get_k_data [ symbol %s period %s ]",symbol,period
logging.info(errormsg)
return []
"""
开始运行某个周期的图表
"""
def startChartRun(symbol,period):
symbol_names = symbol
filename = hst_file_path+symbol_names+str(period)+".hst"
point = 2;
# 写入头部
hstutils.writeHstHead(filename,symbol_names,period,point)
# 写入一个时间段的历史数据
plist = getHistory(symbol,period,point)
if plist is not None:
hstutils.writeStructList(filename,plist);
"""
启动一个图表数据的实时线程
"""
def startThread():
while symbolList:
code = symbolList.pop()
periodList = [5,15,30,60,1440]
for period in periodList:
t = threading.Thread(target=startChartRun,args=(code,period))
t.start()
if __name__ == '__main__':
print "environment init start ...................................."
# 初始化raw文件,并返回所有股票代码
symbolList = initEnvironment.initFunc()
threads = []
# 启动线程数量100
for i in range(100):
t = threading.Thread(target=startThread)
t.setName(i)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print "over .........................close window and open MT4" | gpl-2.0 |
kubeflow/pipelines | components/dataset_manipulation/split_data_into_folds/in_CSV/component.py | 1 | 2735 | from kfp.components import InputPath, OutputPath, create_component_from_func
def split_table_into_folds(
table_path: InputPath('CSV'),
train_1_path: OutputPath('CSV'),
train_2_path: OutputPath('CSV'),
train_3_path: OutputPath('CSV'),
train_4_path: OutputPath('CSV'),
train_5_path: OutputPath('CSV'),
test_1_path: OutputPath('CSV'),
test_2_path: OutputPath('CSV'),
test_3_path: OutputPath('CSV'),
test_4_path: OutputPath('CSV'),
test_5_path: OutputPath('CSV'),
number_of_folds: int = 5,
random_seed: int = 0,
):
"""Splits the data table into the specified number of folds.
The data is split into the specified number of folds k (default: 5).
Each testing subsample has 1/k fraction of samples. The testing subsamples do not overlap.
Each training subsample has (k-1)/k fraction of samples.
The train_i subsample is produced by excluding test_i subsample form all samples.
Inputs:
table: The data to split by rows
number_of_folds: Number of folds to split data into
random_seed: Random seed for reproducible splitting
Outputs:
train_i: The i-th training subsample
test_i: The i-th testing subsample
Annotations:
author: Alexey Volkov <alexey.volkov@ark-kun.com>
"""
import pandas
from sklearn import model_selection
max_number_of_folds = 5
if number_of_folds < 1 or number_of_folds > max_number_of_folds:
raise ValueError('Number of folds must be between 1 and {}.'.format(max_number_of_folds))
df = pandas.read_csv(
table_path,
)
splitter = model_selection.KFold(
n_splits=number_of_folds,
shuffle=True,
random_state=random_seed,
)
folds = list(splitter.split(df))
fold_paths = [
(train_1_path, test_1_path),
(train_2_path, test_2_path),
(train_3_path, test_3_path),
(train_4_path, test_4_path),
(train_5_path, test_5_path),
]
for i in range(max_number_of_folds):
(train_path, test_path) = fold_paths[i]
if i < len(folds):
(train_indices, test_indices) = folds[i]
train_fold = df.iloc[train_indices]
test_fold = df.iloc[test_indices]
else:
train_fold = df.iloc[0:0]
test_fold = df.iloc[0:0]
train_fold.to_csv(train_path, index=False)
test_fold.to_csv(test_path, index=False)
if __name__ == '__main__':
split_table_into_folds_op = create_component_from_func(
split_table_into_folds,
base_image='python:3.7',
packages_to_install=['scikit-learn==0.23.1', 'pandas==1.0.5'],
output_component_file='component.yaml',
)
| apache-2.0 |
kuke/HAMDLE | PyCUDA/plot_first_five_Legendre.py | 1 | 1152 | #! /bin/python
import sys
import matplotlib.pyplot as plt
import numpy as np
X = np.arange(-1, 1.05, 0.05)
order = 5
def generate_Legendre_matrix(order, X):
m = X.size
Legendre_mat = [[0 for col in range(m)] for row in range(order+1)]
Legendre_mat[0] = [1 for i in range(0, m)]
Legendre_mat[1] = X
for i in range(2, order+1):
Legendre_mat[i] = (2*i-1)*np.multiply(X,Legendre_mat[i-1])-(i-1)*Legendre_mat[i-2]
Legendre_mat[i] = Legendre_mat[i]/i
return Legendre_mat
Legendre_mat = generate_Legendre_matrix(order, X)
plt.figure("Legendre polynomials")
if 5==order:
plt.plot(X, Legendre_mat[0], '-', label="n=0",)
plt.plot(X, Legendre_mat[1], '--', label="n=1")
plt.plot(X, Legendre_mat[2], '-.', label="n=2")
plt.plot(X, Legendre_mat[3], ':', label="n=3")
plt.plot(X, Legendre_mat[4], '-+', label="n=4")
plt.plot(X, Legendre_mat[5], '-^', label="n=5")
else:
for i in range(0, order):
plt.plot(X, Legendre_mat[i], label="n="+str(i))
plt.legend(loc='best', prop={'size':16})
plt.axis([-1, 1, -1, 2])
plt.xlabel('$x$', fontsize=18)
plt.ylabel('$P_n(x)$', fontsize=18)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
| gpl-3.0 |
arahuja/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
a-doumoulakis/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 29 | 5677 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
dennissergeev/sattools | sattools/utils.py | 2 | 6718 | # -*- coding: utf-8 -*-
"""
Auxiliary functions for sattools module
"""
from __future__ import division, print_function
import datetime
import h5py
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pytz
default_cmap_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../ccsat_cmap/')
def cc_interp2d(data, X, Z, x1, x2, nx, z1, z2, nz, use_numba=True):
if use_numba:
try:
import numba as nb
except ImportError:
print('Unsuccessful numba import, using pure Python')
use_numba = False
if use_numba:
res = nb.jit()(_interp2d)(data, X, Z, x1, x2, nx, z1, z2, nz)
else:
res = _interp2d(data, X, Z, x1, x2, nx, z1, z2, nz)
return res
def _interp2d(data, X, Z, x1, x2, nx, z1, z2, nz):
"""Interpolate 2D data with coordinates given by 1D and 2D arrays.
data is a two-dimensional array of data to be interpolated.
X and Z are one- and two-dimensional arrays, giving coordinates
of data points along the first and second axis, resp.
data, X and Z are expected to be C-contiguous float32 numpy arrays
with no mask and no transformation (such as transposition) applied.
"""
xs = (x2 - x1)/nx
zs = (z2 - z1)/nz
w = data.shape[0]
h = data.shape[1]
out = np.zeros((nx, nz), dtype=np.float32)
q = np.zeros((nx, nz), dtype=np.int32)
for i in range(w):
n1 = ((X[i-1] + X[i])/2 - x1)/xs if i-1 >= 0 else -1
n2 = ((X[i+1] + X[i])/2 - x1)/xs if i+1 < w else nx
if n2 - n1 < 1: n1 = n2 = (X[i] - x1)/xs
for j in range(h):
m1 = ((Z[i,j-1] + Z[i,j])/2 - z1)/zs if j-1 >= 0 else -1
m2 = ((Z[i,j+1] + Z[i,j])/2 - z1)/zs if j+1 < h else nz
if m2 - m1 < 1: m1 = m2 = (Z[i,j] - z1)/zs
for n in range(int(n1+0.5), int(n2+0.5+1)):
for m in range(int(m1+0.5), int(m2+0.5+1)):
if n < 0 or n >= nx: continue
if m < 0 or m >= nz: continue
if np.isnan(data[i,j]): continue
out[n,m] += data[i,j]
q[n,m] += 1
for n in range(nx):
for m in range(nz):
if q[n,m] == 0:
out[n,m] = np.nan
else:
out[n,m] /= q[n,m]
return out
def calipso_time2dt(time, tzinfo=pytz.utc):
"""Convert float in format yymmdd.ffffffff to datetime."""
d = int(time % 100)
m = int((time-d) % 10000)
y = int(time-m-d)
return datetime.datetime(2000 + y//10000, m//100, d, tzinfo=tzinfo) + datetime.timedelta(time % 1)
def get_cc_cmap(satname='cloudsat', cmap_dir=default_cmap_dir):
if 'cloudsat' in satname.lower():
cmap_file = os.path.join(cmap_dir, 'cloudsat-reflectivity.cmap')
elif 'calipso' in satname.lower():
cmap_file = os.path.join(cmap_dir, 'calipso-backscatter.cmap')
else:
raise ValueError('Unrecognized satellite name')
cmap = _cmap(cmap_file)
cm = mpl.colors.ListedColormap(cmap['colors']/255.0)
cm.set_under(cmap['under']/255.0)
cm.set_over(cmap['over']/255.0)
cm.set_bad(cmap['bad']/255.0)
norm = mpl.colors.BoundaryNorm(cmap['bounds'], cm.N)
return dict(cmap=cm, norm=norm)
def _cmap(filename):
"""Load colormap from file. The expected format of the file is:
BOUNDS
from1 to1 step1
from2 to2 step2
[...]
TICKS
from1 to1 step1
from2 to2 step2
[...]
COLORS
r1 g1 b1
r2 g2 b2
[...]
UNDER_OVER_BAD_COLORS
ro go bo
ru gu bu
rb gb bb
where fromn, ton, stepn are floating point numbers as would be supplied
to numpy.arange, and rn, gn, bn are the color components the n-th color
stripe. Components are expected to be in base 10 format (0-255).
UNDER_OVER_BAD_COLORS section specifies colors to be used for
over, under and bad (masked) values, in that order.
"""
bounds = []
ticks = []
colors = []
special = []
mode = "COLORS"
white = [1, 1, 1, 1]
try:
with open(filename) as f:
for n, s in enumerate(f.readlines()):
s = s.strip()
# Skip blank lines.
if len(s) == 0:
continue
if s in ("BOUNDS", "TICKS", "COLORS", "UNDER_OVER_BAD_COLORS"):
mode = s
continue
a = s.split()
if len(a) not in (3, 4):
raise ValueError("Invalid number of fields")
if mode == "BOUNDS":
bounds += list(np.arange(float(a[0]), float(a[1]), float(a[2])))
elif mode == "TICKS":
ticks += list(np.arange(float(a[0]), float(a[1]), float(a[2])))
elif mode == "COLORS":
rgba = [int(c) for c in a]
if len(rgba) == 3:
rgba.append(255)
colors.append(rgba)
elif mode == "UNDER_OVER_BAD_COLORS":
rgba = [int(c) for c in a]
if len(rgba) == 3:
rgba.append(255)
special.append(rgba)
except IOError as e:
raise e
except ValueError as e:
raise ValueError("Error reading `%s' on line %d: %s" %
(filename, n+1, e))
return {
'colors': np.array(colors),
'bounds': np.array(bounds),
'ticks': np.array(ticks),
'under': np.array(special[0] if len(special) >= 1 else white),
'over': np.array(special[1] if len(special) >= 2 else white),
'bad': np.array(special[2] if len(special) >= 3 else white),
}
def figview(print2file=False,outdir=os.curdir, \
imgname='test_image',imgform='png',imgres=500, \
maxfig=False, tight=False):
if tight:
plt.tight_layout(pad=2)
if print2file:
imgname = os.path.join(outdir,imgname+'.'+imgform)
checkdir(outdir)
print('Saved as ' + imgname)
plt.savefig(imgname,dpi=imgres)
plt.close()
else:
if maxfig:
mpl_backend = plt.get_backend()
figManager = plt.get_current_fig_manager()
if mpl_backend == 'Qt4Agg':
figManager.window.showMaximized()
elif mpl_backend == 'TkAgg':
figManager.window.state('zoomed')
elif mpl_backend == 'wxAgg':
figManager.frame.Maximize(True)
else:
print("Cannot maximize for: "+mpl_backend)
plt.show()
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/feature_extraction/hashing.py | 24 | 5668 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| mit |
markinho-web/markinho-web.github.io | MEFaplicado-html/estado_plano/codigos/Derivando-FuncoesFormaEstadoPlano3nos.py | 2 | 22792 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 14:46:37 2019
Funções de forma ok!
@author: markinho
"""
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib import rcParams
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
import meshio
#elemento mantido no x-y
l = sp.Symbol('l')
x_1, y_1, x_2, y_2, x_3, y_3 = sp.symbols('x_1, y_1, x_2, y_2, x_3, y_3')
u_1 = sp.Symbol('u_1')
u_2 = sp.Symbol('u_2')
u_3 = sp.Symbol('u_3')
#polinomio completo do segundo grau completo
#c0 + c1 x1 + c2 x_2 + c3 x1**2 + c4 x1 x_2 + c5 x_2**2
Mat_Coef = sp.Matrix([[1, x_1, y_1], #no1
[1, x_2, y_2], #no2
[1, x_3, y_3]]) #no3
U = sp.Matrix([u_1, u_2, u_3])
Coefs = Mat_Coef.inv() * U
An, Ad = sp.fraction(sp.simplify(Coefs[0]))
Bn, Bd = sp.fraction(sp.simplify(Coefs[1]))
Cn, Cd = sp.fraction(sp.simplify(Coefs[2]))
x, y = sp.symbols('x, y')
A_t = sp.Symbol('A_t') #área do triângulo
#Ns = 1/(2*A_t) * sp.expand(An + Bn*x + Cn*y)
Ns = sp.expand(An + Bn*x + Cn*y)
N1 = sp.simplify( sp.Add(*[argi for argi in Ns.args if argi.has(u_1)]).subs(u_1, 1) )
N2 = sp.simplify( sp.Add(*[argi for argi in Ns.args if argi.has(u_2)]).subs(u_2, 1) )
N3 = sp.simplify( sp.Add(*[argi for argi in Ns.args if argi.has(u_3)]).subs(u_3, 1) )
N = sp.Matrix([[N1, 0, N2, 0, N3, 0], [0, N1, 0, N2, 0, N3]])
#grafico das funcoes de forma -------------------------------------------------------------------
nN1 = sp.utilities.lambdify([x, y], N1, "numpy")
nN2 = sp.utilities.lambdify([x, y], N2, "numpy")
nN3 = sp.utilities.lambdify([x, y], N3, "numpy")
xl = np.linspace(-1., 1., 30)
yl = np.linspace(-1., 1., 30)
xm, ym = np.meshgrid(xl, yl)
#plotagem com o matplotlib -------------------------------------------------------------------------------
#somente para ilustração!!!
fig = plt.figure()
ax = Axes3D(fig)
verts1 = [[0, 0, 1],[1, 0, 0],[0, 1, 0]]
verts2 = [[0, 0, 0],[1, 0, 0],[0, 1, 1]]
verts3 = [[0, 0, 0],[1, 0, 1],[0, 1, 0]]
#ax.add_collection3d(Poly3DCollection([verts1]))
#ax.add_collection3d(Poly3DCollection([verts2]))
ax.add_collection3d(Poly3DCollection([verts3]))
plt.show()
#-------------------------------------------------------------------------------------------------------------
##primeira derivada em x
#dN1x = sp.diff(N1, x)#.subs({r: r1, s: s1})
#dN2x = sp.diff(N2, x)#.subs({r: r2, s: s2})
#dN3x = sp.diff(N3, x)#.subs({r: r3, s: s3})
#dN4r = sp.diff(N4, r)#.subs({r: r4, s: s4})
##convertendo para função lambda nuympy
#ndN1r = sp.utilities.lambdify([r, s], dN1r, "numpy")
#ndN2r = sp.utilities.lambdify([r, s], dN2r, "numpy")
#ndN3r = sp.utilities.lambdify([r, s], dN3r, "numpy")
#ndN4r = sp.utilities.lambdify([r, s], dN4r, "numpy")
#
##primeira derivada em s
#dN1y = sp.diff(N1, y)#.subs({r: r1, s: s1})
#dN2y = sp.diff(N2, y)#.subs({r: r2, s: s2})
#dN3y = sp.diff(N3, y)#.subs({r: r3, s: s3})
#dN4s = sp.diff(N4, s)#.subs({r: r4, s: s4})
##convertendo para função lambda nuympy
#ndN1s = sp.utilities.lambdify([r, s], dN1s, "numpy")
#ndN2s = sp.utilities.lambdify([r, s], dN2s, "numpy")
#ndN3s = sp.utilities.lambdify([r, s], dN3s, "numpy")
#ndN4s = sp.utilities.lambdify([r, s], dN4s, "numpy")
#
##gerando a matriz dNdx analítica
#x1 = sp.Symbol('x1')
#y1 = sp.Symbol('y1')
#x2 = sp.Symbol('x2')
#y2 = sp.Symbol('y2')
#x3 = sp.Symbol('x3')
#y3 = sp.Symbol('y3')
#x4 = sp.Symbol('x4')
#y4 = sp.Symbol('y4')
##Matriz dos nós de um elemento
#Xe = sp.Matrix([[x1, y1],[x2, y2], [x3, y3], [x4, y4]])
##Matriz das derivadas das funções de interpolação do elemento padrão no sistema r s
#dNds = sp.Matrix([[dN1r, dN1s], [dN2r, dN2s], [dN3r, dN3s], [dN4r, dN4s]])
#
##Jacobiano analítico
#J = Xe.T * dNds
#JI = J.inv()
#
##derivadas das funções de interpolação do elemento no sistema local x y
#dNdx = dNds * JI
#
#B = sp.Matrix([[dN1x, 0, dN2x, 0, dN3x, 0],
# [0, dN1y, 0, dN2y, 0, dN3y],
# [dN1y, dN1x, dN2y, dN2x, dN3y, dN3x]])
#
##tensores constitutivos
#E, nu = sp.symbols('E, nu')
#
##tensor constitutivo para o estado plano de tensões
##D_t = E/(1 - nu**2) * sp.Matrix([[1, nu, 0], [nu, 1, 0], [0, 0, (1 - nu**2)/( 2*(1 + nu) )]])
#D_t = sp.Matrix([[1, nu, 0], [nu, 1, 0], [0, 0, (1 - nu**2)/( 2*(1 + nu) )]])
#
##tensor constitutivo para o estado plano de deformação
#D_d = E/( (1 + nu)*(1 - 2*nu) )*sp.Matrix([[1 - nu, nu, 0], [nu, 1 - nu, 0], [0, 0, (1 - 2*nu)/2]])
#
##integração da matriz de rigidez com espessura constante no elemento
#t = sp.Symbol('t')
#
#BtDB_t = B.T * D_t * B
#BtDB_d = B.T * D_d * B
#
#ke_t = t * A_t * BtDB_t
#ke_d = t * A_t * BtDB_d
#### iniciando do código numérico ---------------------------------------------------------------------------------------------------------------------
#def dNdx(Xe, pg):
# '''
# Função para a determinação da matriz das derivadas das funções de interpolação já no sistema x y e do jacobiano
#
# Parâmetros
# ----------
#
# Xe: array numpy com as coordenadas de cada nó dos elementos dispostas no sentido horário, com o primeiro nó o correspondente ao segundo quadrante
#
# >>>
# 2 ----- 1
# | |
# | |
# 3-------4
#
# >>> Xe = np.array([ [x1, y1], [x2, y2], [x3, y3], [x4, y4] ])
#
# pg: coordenadas do ponto de gauss utilizado
#
# >>> pg = np.array([ [xpg, ypg] ])
#
# retorna a matriz B para cada ponto de gauss
# '''
# r = pg[0]
# s = pg[1]
# x1 = Xe[0,0]
# y1 = Xe[0,1]
# x2 = Xe[1,0]
# y2 = Xe[1,1]
# x3 = Xe[2,0]
# y3 = Xe[2,1]
# x4 = Xe[3,0]
# y4 = Xe[3,1]
#
# J = np.array([ [x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4), x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4)],
# [y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4), y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)]])
#
# dNdx = np.array([ [ (r/4 + 1/4)*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) + (s/4 + 1/4)*(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4)) - (x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))/((-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))), (r/4 + 1/4)*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) - (s/4 + 1/4)*(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))],
# [(-r/4 + 1/4)*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) + (-s/4 - 1/4)*(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4)) - (x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))/((-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))), (-r/4 + 1/4)*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) - (-s/4 - 1/4)*(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))],
# [ (r/4 - 1/4)*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) + (s/4 - 1/4)*(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4)) - (x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))/((-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))), (r/4 - 1/4)*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) - (s/4 - 1/4)*(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))],
# [(-r/4 - 1/4)*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) + (-s/4 + 1/4)*(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(-y1*(s/4 + 1/4) - y2*(-s/4 - 1/4) - y3*(s/4 - 1/4) - y4*(-s/4 + 1/4)) - (x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))/((-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))), (-r/4 - 1/4)*(x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4))) - (-s/4 + 1/4)*(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))/(-(x1*(r/4 + 1/4) + x2*(-r/4 + 1/4) + x3*(r/4 - 1/4) + x4*(-r/4 - 1/4))*(y1*(s/4 + 1/4) + y2*(-s/4 - 1/4) + y3*(s/4 - 1/4) + y4*(-s/4 + 1/4)) + (x1*(s/4 + 1/4) + x2*(-s/4 - 1/4) + x3*(s/4 - 1/4) + x4*(-s/4 + 1/4))*(y1*(r/4 + 1/4) + y2*(-r/4 + 1/4) + y3*(r/4 - 1/4) + y4*(-r/4 - 1/4)))]])
# B1x = dNdx[0,0]
# B1y = dNdx[0,1]
# B2x = dNdx[1,0]
# B2y = dNdx[1,1]
# B3x = dNdx[2,0]
# B3y = dNdx[2,1]
# B4x = dNdx[3,0]
# B4y = dNdx[3,1]
# B = np.array([[B1x, 0, B2x, 0, B3x, 0, B4x, 0],
# [ 0, B1y, 0, B2y, 0, B3y, 0, B4y],
# [B1y, B1x, B2y, B2x, B3y, B3x, B4y, B4x]])
# return B, J
#
#def ke(Xe, E, nu, t):
# '''
# Função para a geração das matrizes de rigidez dos elementos função das coordenadas dos elementos no sistema global, o módulo de elasticidade
# do material (E), o corficiente de poisson do material (nu) e da espessura (t), considerando 4 pontos de gauss para a integração
#
# Parâmetros
# ----------
#
# Xe: array numpy com as coordenadas de cada nó dos elementos dispostas no sentido antihorário, com o primeiro nó o correspondente ao primeiro quadrante.
#
# >>> Xe = np.array([ [x1, y1], [x2, y2], [x3, y3], [x4, y4] ])
# '''
# #matriz constitutiva do material
# D = E/(1 - nu**2) * np.array([[1, nu, 0],
# [nu, 1, 0],
# [0, 0, (1 - nu**2)/(2 + 2*nu)]])
# #número de graus de liberdade por elemento
# GLe = 8
# #coordenadas e pesos dos pontos de Gauss
# PG = np.array([[0.5773502691896258, 0.5773502691896258],
# [-0.5773502691896258, 0.5773502691896258],
# [-0.5773502691896258, -0.5773502691896258],
# [0.5773502691896258, -0.5773502691896258]])
# wPG = np.array([[1., 1.],
# [1., 1.],
# [1., 1.],
# [1., 1.]])
# Be = []
# Ke = np.zeros((GLe, GLe))
# for p in range(PG.shape[0]):
# B, J = dNdx(Xe, PG[p])
# Be.append(B)
# Ke += np.matmul( np.matmul(np.transpose(B), D), B) * wPG[p, 0] * wPG[p, 1] * np.linalg.det(J) * t
# return Ke, Be
#
##coordenadas dos nós da estrutura
#NOS = np.array([ [30., 0.],
# [60., 0.],
# [90., 0.],
# [120., 0.],
# [30., 20.],
# [60., 20.],
# [90., 20.],
# [120., 20.],
# [0., 0.],
# [0., 20.],])
#
##incidência dos elementos !!! DEVE SEGUIR A ORDEM DAS FUNÇÕES DE INTERPOLAÇÃO DEFINIDA NA FUNÇÃO dNdx !!!
#IE = np.array([ [9, 4, 0, 8],
# [4, 5, 1, 0],
# [5, 6, 2, 1],
# [6, 7, 3, 2],])
#
##malha de elementos
#Xe = []
#for e in IE:
# Xe.append( np.array([ NOS[e[0]], NOS[e[1]], NOS[e[2]], NOS[e[3]] ]) )
#
##propriedades mecânicas do material da estrutura e espessura
#E = 20000. #kN/cm2
#nu = 0.3
#t = 10. #cm
##resistência a compressão e a tração para aplicação do critério de Christensen, aço A-36
#Sc = 25. #kN/cm2
#St = 5. #kN/cm2
##coesão e o ângulo de atrito para os critérios de Mohr-Coulomb e Drucker-Prager (http://www.pcc.usp.br/files/text/publications/BT_00231.pdf)
#phi = 51. * np.pi/180.
#coesao = 0.00073 #kN/cm2
#
##determinação da matriz de rigidez dos elementos
#Ke1, Be1 = ke(Xe[0], E, nu, t)
#Ke2, Be2 = ke(Xe[1], E, nu, t)
#Ke3, Be3 = ke(Xe[2], E, nu, t)
#Ke4, Be4 = ke(Xe[3], E, nu, t)
#
##indexação dos graus de liberdade
#ID1 = np.repeat(IE[0]*2, 2) + np.tile(np.array([0, 1]), 4)
#ID2 = np.repeat(IE[1]*2, 2) + np.tile(np.array([0, 1]), 4)
#ID3 = np.repeat(IE[2]*2, 2) + np.tile(np.array([0, 1]), 4)
#ID4 = np.repeat(IE[3]*2, 2) + np.tile(np.array([0, 1]), 4)
#
##graus de liberdade da estrutura
#GL = NOS.shape[0]*2
#DOF = GL - 4
#
##montagem da matriz de rigidez da estrutura
#K = np.zeros((GL, GL))
#for i in range(8):
# for j in range(8):
# K[ ID1[i], ID1[j] ] += Ke1[i, j]
# K[ ID2[i], ID2[j] ] += Ke2[i, j]
# K[ ID3[i], ID3[j] ] += Ke3[i, j]
# K[ ID4[i], ID4[j] ] += Ke4[i, j]
#
##separação das matrizes de rigidez
#Ku = K[:DOF, :DOF]
#Kr = K[GL-DOF:, :DOF]
#
##vetor de forças nodais
#F = np.zeros(GL)
#F[7] = -150. #kN
#F[15] = -150. #kN
#
#Fu = F[:DOF]
#Fr = F[GL-DOF:]
#
#Uu = np.linalg.solve(Ku, Fu)
#Rr = np.matmul(Kr, Uu) - Fr
#
#U = np.zeros(GL)
#U[:DOF] = Uu
#
#Uxy = U.reshape(NOS.shape)
#
###visualização dos deslocamentos ---------------------------------------------------------------------------------------------------------------------
##fig = go.Figure(data = go.Contour(z=Uxy[:,0], x=NOS[:,0], y=NOS[:,1], colorscale='Jet', contours=dict(
## showlabels = True, # show labels on contours
## labelfont = dict(size = 12, color = 'white') ) ) )
##fig.update_layout(title="Deslocamentos em X", autosize=True, width=1200, height=400)
##fig.write_html('deslocamentos.html')
#
###visualização dos vetores de deslocamentos dos nós
##fig = plty.figure_factory.create_quiver(NOS[:,0], NOS[:,1], Uxy[:,0], Uxy[:,1])
##fig.write_html('deslocamentosVetor.html')
#
###geração do arquivo vtu
##pontos = NOS
##celulas = {'quad': IE}
##meshio.write_points_cells(
## "teste.vtu",
## pontos,
## celulas,
## # Optionally provide extra data on points, cells, etc.
## point_data = {"U": Uxy},
## # cell_data=cell_data,
## # field_data=field_data
## )
#
##-----------------------------------------------------------------------------------------------------------------------------------------------------
#
##determinação dos deslocamentos por elemento
#Ue = []
#Ue.append( U[ID1] )
#Ue.append( U[ID2] )
#Ue.append( U[ID3] )
#Ue.append( U[ID4] )
#
#
##determinação das deformações por ponto de Gauss -----------------------------------------------------------------------------------------------------
#epsilon1 = []
#epsilon2 = []
#epsilon3 = []
#epsilon4 = []
#for b in range(4): #range na quantidade de pontos de Gauss
# epsilon1.append( np.matmul(Be1[b], Ue[0]) )
# epsilon2.append( np.matmul(Be2[b], Ue[1]) )
# epsilon3.append( np.matmul(Be3[b], Ue[2]) )
# epsilon4.append( np.matmul(Be4[b], Ue[3]) )
#
##determinação das tensões por ponto de Gauss ---------------------------------------------------------------------------------------------------------
##matriz constitutiva do material
#D = E/(1 - nu**2) * np.array([[1, nu, 0],
# [nu, 1, 0],
# [0, 0, (1 - nu**2)/(2 + 2*nu)]])
#
#sigma1 = []
#sigma2 = []
#sigma3 = []
#sigma4 = []
#for b in range(4): #range na quantidade de pontos de Gauss
# sigma1.append( np.matmul(D, epsilon1[b]) )
# sigma2.append( np.matmul(D, epsilon2[b]) )
# sigma3.append( np.matmul(D, epsilon3[b]) )
# sigma4.append( np.matmul(D, epsilon4[b]) )
#
##cálculo das tensões principais nos pontos de Gauss---------------------------------------------------------------------------------------------------
##tensão principal máxima, tensão principal mínima, ângulo das tensões principais, tensão máxima de cisalhamento, tensão equivalente de von Mises
#sigmaPP1 = []
#sigmaPP2 = []
#sigmaPP3 = []
#sigmaPP4 = []
#
#def principaisPG(sigmas):
# '''
# Função para a determinação da tensão principal 1 (sigmaMAX), tensão principal 2 (sigmaMIN),
# ângulo das tensões principais, tensão máxima de cisalhamento, tensão equivalente de von Mises, de Christensen para materiais frágeis com
# sigmaC <= 0.5 sigmaT (que deve ser menor que 1), de Morh-Coulomb de Drucker-Prager para a tensão fora do plano igual a zero
#
# sigmas é um array de uma dimensão contendo sigma_x, sigma_y e tau_xy
#
# retorna um array de uma dimensão com as quantidades acima
# '''
# sigma_x = sigmas[0]
# sigma_y = sigmas[1]
# tay_xy = sigmas[2]
#
# sigmaMAX = (sigma_x + sigma_y)/2 + np.sqrt( ((sigma_x - sigma_y)/2)**2 + tay_xy**2 )
# sigmaMIN = (sigma_x + sigma_y)/2 - np.sqrt( ((sigma_x - sigma_y)/2)**2 + tay_xy**2 )
# theta = 1./2. * np.arctan( 2*tay_xy/(sigma_x - sigma_y) )
# tauMAX = (sigmaMAX - sigmaMIN)/2
# sigmaEQvM = np.sqrt( sigmaMAX**2 - sigmaMAX*sigmaMIN + sigmaMIN**2 )
# sigmaEQc = (1/St - 1/Sc)*(sigmaMAX + sigmaMIN) + 1/(St*Sc)*(sigmaMAX**2 - sigmaMAX*sigmaMIN + sigmaMIN**2)
# sigmaEQmc = 2*( (sigmaMAX + sigmaMIN)/2.*np.sin(phi) + coesao*np.cos(phi) )/(sigmaMAX - sigmaMIN)
# A = 2*1.4142135623730951*np.sin(phi)/(3 - np.sin(phi))
# B = 3.*coesao*np.cos(phi)/np.sin(phi)
# sigmaEQdp = ( (sigmaMAX - sigmaMIN)**2 + sigmaMAX**2 + sigmaMIN**2 )/( A**2*(sigmaMAX + sigmaMIN + B)**2 )
#
# return np.array([ sigmaMAX, sigmaMIN, theta, tauMAX, sigmaEQvM, sigmaEQc, sigmaEQmc, sigmaEQdp ])
#
#for p in range(4):
# sigmaPP1.append( principaisPG(sigma1[p]) )
# sigmaPP2.append( principaisPG(sigma2[p]) )
# sigmaPP3.append( principaisPG(sigma3[p]) )
# sigmaPP4.append( principaisPG(sigma4[p]) )
#
##cálculo das tensões nos nós, interpolando com as funções de interpolação dos elementos
| mit |
mattjj/pyhawkes | experiments/discrete_continuous_comparison.py | 2 | 5308 | import time
import numpy as np
np.random.seed(1111)
np.seterr(over="raise")
import cPickle, os
from hips.plotting.layout import create_figure
import matplotlib.pyplot as plt
import brewer2mpl
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
# goodcolors = np.array([0,1,2,4,6,7,8])
# colors = np.array(colors)[goodcolors]
from pybasicbayes.util.general import ibincount
from pybasicbayes.util.text import progprint_xrange
import pyhawkes.models
reload(pyhawkes.models)
# Set globals
K = 10
B = 3
dt = 1
dt_max = 10.
T = 100.
network_hypers = {'C': 1, 'kappa': 1., 'c': np.zeros(K, dtype=np.int), 'p': 1*np.ones((1,1)), 'v': 10.}
def generate_dataset(bias=1.):
# Create the model with these parameters
network_hypers = {'C': 1, 'kappa': 1., 'c': np.zeros(K, dtype=np.int), 'p': 1*np.ones((1,1)), 'v': 100.}
bkgd_hypers = {"alpha": 3., "beta": 3./bias}
dt_model = pyhawkes.models.\
DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
bkgd_hypers=bkgd_hypers,
network_hypers=network_hypers)
# dt_model.bias_model.lambda0 = bias * np.ones(K)
assert dt_model.check_stability()
S_dt,_ = dt_model.generate(T=int(np.ceil(T/dt)), keep=False)
print "sampled dataset with ", S_dt.sum(), "events"
# Convert S_dt to continuous time
S_ct = dt * np.concatenate([ibincount(S) for S in S_dt.T]).astype(float)
S_ct += dt * np.random.rand(*S_ct.shape)
assert np.all(S_ct < T)
C_ct = np.concatenate([k*np.ones(S.sum()) for k,S in enumerate(S_dt.T)]).astype(int)
# Sort the data
perm = np.argsort(S_ct)
S_ct = S_ct[perm]
C_ct = C_ct[perm]
return S_dt, S_ct, C_ct
def fit_discrete_time_model_gibbs(S_dt, N_samples=100):
# Now fit a DT model
dt_model_test = pyhawkes.models.\
DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=network_hypers)
dt_model_test.add_data(S_dt)
tic = time.time()
for iter in progprint_xrange(N_samples, perline=25):
dt_model_test.resample_model()
toc = time.time()
return (toc-tic) / N_samples
def fit_continuous_time_model_gibbs(S_ct, C_ct, N_samples=100):
# Now fit a DT model
ct_model = pyhawkes.models.\
ContinuousTimeNetworkHawkesModel(K, dt_max=dt_max,
network_hypers=network_hypers)
ct_model.add_data(S_ct, C_ct, T)
tic = time.time()
for iter in progprint_xrange(N_samples, perline=25):
ct_model.resample_model()
toc = time.time()
return (toc-tic) / N_samples
# def run_time_vs_bias():
if __name__ == "__main__":
# run_time_vs_bias()
# biases = np.logspace(-1,1, num=10)
res_file = os.path.join("results", "run_time_vs_rate_2.pkl")
if os.path.exists(res_file):
print "Loading results from ", res_file
with open(res_file, "r") as f:
events_per_bin, dt_times, ct_times = cPickle.load(f)
else:
biases = np.linspace(10**-1,3**1, num=5)
N_runs_per_bias = 5
N_samples = 100
events_per_bin = []
dt_times = []
ct_times = []
for bias in biases:
for iter in xrange(N_runs_per_bias):
print "Bias ", bias, " Run (%d/%d)" % (iter, N_runs_per_bias)
S_dt, S_ct, C_ct = generate_dataset(bias)
events_per_bin.append(S_dt.sum() / float(S_dt.size))
dt_times.append(fit_discrete_time_model_gibbs(S_dt, N_samples))
ct_times.append(fit_continuous_time_model_gibbs(S_ct, C_ct, N_samples))
with open(res_file, "w") as f:
cPickle.dump((events_per_bin, dt_times, ct_times), f, protocol=-1)
events_per_bin = np.array(events_per_bin)
dt_times = np.array(dt_times)
ct_times = np.array(ct_times)
perm = np.argsort(events_per_bin)
events_per_bin = events_per_bin[perm]
dt_times = dt_times[perm]
ct_times = ct_times[perm]
# Plot the results
fig = create_figure(figsize=(2.5,2.5))
fig.set_tight_layout(True)
ax = fig.add_subplot(111)
# Plot DT data
ax.plot(events_per_bin, dt_times, 'o', linestyle="none",
markerfacecolor=colors[2], markeredgecolor=colors[2], markersize=4,
label="Discrete")
# Plot linear fit
p_dt = np.poly1d(np.polyfit(events_per_bin, dt_times, deg=1))
dt_pred = p_dt(events_per_bin)
ax.plot(events_per_bin, dt_pred, ':', lw=2, color=colors[2])
# Plot CT data
ax.plot(events_per_bin, ct_times, 's', linestyle="none",
markerfacecolor=colors[7], markeredgecolor=colors[7], markersize=4,
label="Continuous")
# Plot quadratic fit
p_ct = np.poly1d(np.polyfit(events_per_bin, ct_times, deg=2))
ct_pred = p_ct(sorted(events_per_bin))
ax.plot(events_per_bin, ct_pred, ':', lw=2, color=colors[7])
plt.xlabel("Events per bin")
# plt.xlim(0, events_per_bin.max())
plt.xlim(0, 6)
plt.ylabel("time per iter [sec]")
plt.ylim(0, 0.15)
plt.legend(loc="upper left", prop={"size": 8})
fig.savefig(os.path.join("results", "discrete_cont_comparison.pdf"))
plt.show()
| mit |
r-mart/scikit-learn | sklearn/metrics/tests/test_classification.py | 83 | 49782 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
jreback/pandas | pandas/core/indexes/timedeltas.py | 1 | 9039 | """ implement the TimedeltaIndex """
from pandas._libs import index as libindex, lib
from pandas._libs.tslibs import Timedelta, to_offset
from pandas._typing import DtypeObj
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import TD64NS_DTYPE, is_scalar, is_timedelta64_dtype
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import TimedeltaArray
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
@inherit_names(
["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]
+ TimedeltaArray._field_ops,
TimedeltaArray,
wrap=True,
)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"_other_ops",
"components",
"to_pytimedelta",
"sum",
"std",
"median",
"_format_native_types",
],
TimedeltaArray,
)
class TimedeltaIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects.
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with.
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
Which is an integer/float number.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
mean
See Also
--------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "timedeltaindex"
_data_cls = TimedeltaArray
_engine_type = libindex.TimedeltaEngine
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
_is_numeric_dtype = False
_data: TimedeltaArray
# -------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
unit=None,
freq=lib.no_default,
closed=None,
dtype=TD64NS_DTYPE,
copy=False,
name=None,
):
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if isinstance(data, TimedeltaArray) and freq is lib.no_default:
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
if isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence_not_strict(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr, name=name)
# -------------------------------------------------------------------
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
return is_timedelta64_dtype(dtype)
# -------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int, slice, or ndarray[int]
"""
if not is_scalar(key):
raise InvalidIndexError(key)
try:
key = self._data._validate_scalar(key, unbox=False)
except TypeError as err:
raise KeyError(key) from err
return Index.get_loc(self, key, method, tolerance)
def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
"""
assert kind in ["loc", "getitem", None]
if isinstance(label, str):
parsed = Timedelta(label)
lbound = parsed.round(parsed.resolution_string)
if side == "left":
return lbound
else:
return lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
# -------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "timedelta64"
def timedelta_range(
start=None, end=None, periods=None, freq=None, name=None, closed=None
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
freq, _ = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)
return TimedeltaIndex._simple_new(tdarr, name=name)
| bsd-3-clause |
roytu/impede | impede-app/server/py/filter_library.py | 2 | 4231 |
""" Module that contains some example filters """
import numpy as np
import matplotlib.pyplot as plt
from graph import Node, Edge, Graph
from resistor import Resistor
from capacitor import Capacitor
from diode import Diode
from opamp import Opamp
from wire import Wire
from units import Units
from filter import Filter
def make_mxr_distortion_filter():
""" Return the MXR filter from:
http://www.premierguitar.com/articles/mxr-distortion-plus-mods-1
Returns:
Filter object
"""
probes = []
graph = Graph()
# Knobs
gain_param = 0.5
mix_param = 0.1
# Input / output
node_in = Node(graph, fixed=True, source=True, label="Vin")
node_out = Node(graph, output=True, label="Vout")
# Supply
node_4_5 = Node(graph, value=4.5, fixed=True, source=True, label="4.5V")
node_gnd = Node(graph, value=0, fixed=True, source=True, label="GND")
# Probe Vin
probes.append(node_in)
# Op amp plus section
edge = Edge(graph, node_in, node_gnd, label="I1")
capacitor = Capacitor(graph, .001 * Units.u, node_in, node_gnd, edge)
graph.add_component(capacitor)
node = Node(graph, label="V1")
edge = Edge(graph, node_in, node, label="I2")
#capacitor = Capacitor(graph, .01 * Units.u, node_in, node, edge)
#graph.add_component(capacitor)
wire = Wire(graph, node_in, node, edge)
graph.add_component(wire)
node_plus = Node(graph, label="V+")
edge = Edge(graph, node, node_plus, label="I3")
resistor = Resistor(graph, 10 * Units.K, node, node_plus, edge)
graph.add_component(resistor)
edge = Edge(graph, node_plus, node_4_5, label="I4")
resistor = Resistor(graph, 1 * Units.M, node_plus, node_4_5, edge)
graph.add_component(resistor)
# Op amp minus section
node = Node(graph, label="V2")
edge = Edge(graph, node, node_gnd, label="I5")
resistor = Resistor(graph, gain_param * (1 * Units.M), node, node_gnd, edge)
graph.add_component(resistor)
node_1 = Node(graph, label="V3")
edge = Edge(graph, node, node_1, label="I6")
resistor = Resistor(graph, 4.7 * Units.K, node, node_1, edge)
graph.add_component(resistor)
node_minus = Node(graph, label="V-")
edge = Edge(graph, node_1, node_minus, label="I7")
#capacitor = Capacitor(graph, 0.047 * Units.u, node_1, node_minus, edge)
#graph.add_component(capacitor)
wire = Wire(graph, node_1, node_minus, edge)
graph.add_component(wire)
# Op amp
node_output = Node(graph, source=True, label="Vo")
op_amp = Opamp(graph, node_a=node_minus, node_b=node_plus, node_out=node_output)
graph.add_component(op_amp)
edge = Edge(graph, node_minus, node_output, label="I8")
resistor = Resistor(graph, 1 * Units.M, node_minus, node_output, edge)
graph.add_component(resistor)
# Op amp output
node = Node(graph, label="V4")
edge = Edge(graph, node_output, node, label="I9")
capacitor = Capacitor(graph, 1 * Units.u, node_output, node, edge)
graph.add_component(capacitor)
node_1 = Node(graph, label="V5")
edge = Edge(graph, node, node_1, label="I10")
resistor = Resistor(graph, 10 * Units.K, node, node_1, edge)
graph.add_component(resistor)
edge = Edge(graph, node_1, node_gnd, label="I11")
diode1 = Diode(graph, node_a=node_1, node_b=node_gnd, edge_i=edge)
graph.add_component(diode1)
edge = Edge(graph, node_gnd, node_1, label="I12")
diode2 = Diode(graph, node_a=node_gnd, node_b=node_1, edge_i=edge)
graph.add_component(diode2)
edge = Edge(graph, node_1, node_gnd, label="I13")
capacitor = Capacitor(graph, .001 * Units.u, node_1, node_gnd, edge)
graph.add_component(capacitor)
# Output potentiometer
edge = Edge(graph, node_1, node_out, label="I14")
resistor = Resistor(graph, mix_param * (10 * Units.K), node_1, node_out, edge)
graph.add_component(resistor)
edge = Edge(graph, node_out, node_gnd, label="I15")
resistor = Resistor(graph, (1 - mix_param) * (10 * Units.K), node_out, node_gnd, edge)
graph.add_component(resistor)
# Probe Vout
probes.append(node_out)
mxr_filter = Filter(graph, node_in, node_out, probes=probes)
return mxr_filter
| mit |
dungvtdev/upsbayescpm | bayespy/inference/vmp/nodes/GaussianProcesses.py | 5 | 25953 | ################################################################################
# Copyright (C) 2011-2012 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import itertools
import numpy as np
#import scipy as sp
#import scipy.linalg.decomp_cholesky as decomp
import scipy.linalg as linalg
#import scipy.special as special
#import matplotlib.pyplot as plt
#import time
#import profile
#import scipy.spatial.distance as distance
import scipy.sparse as sp
from bayespy.utils import misc as utils
from . import node as EF
from . import CovarianceFunctions as CF
class CovarianceMatrix:
def cholesky(self):
pass
def multiply(A, B):
return np.multiply(A,B)
# m prior mean function
# k prior covariance function
# x data inputs
# z processed data outputs (z = inv(Cov) * (y-m(x)))
# U data covariance Cholesky factor
def gp_posterior_moment_function(m, k, x, y, k_sparse=None, pseudoinputs=None, noise=None):
# Prior
# FIXME: We are ignoring the covariance of mu now..
mu = m(x)[0]
## if np.ndim(mu) == 1:
## mu = np.asmatrix(mu).T
## else:
## mu = np.asmatrix(mu)
K_noise = None
if noise != None:
if K_noise is None:
K_noise = noise
else:
K_noise += noise
if k_sparse != None:
if K_noise is None:
K_noise = k_sparse(x,x)[0]
else:
K_noise += k_sparse(x,x)[0]
if pseudoinputs != None:
p = pseudoinputs
#print('in pseudostuff')
#print(K_noise)
#print(np.shape(K_noise))
K_pp = k(p,p)[0]
K_xp = k(x,p)[0]
U = utils.chol(K_noise)
# Compute Lambda
Lambda = K_pp + np.dot(K_xp.T, utils.chol_solve(U, K_xp))
U_lambda = utils.chol(Lambda)
# Compute statistics for posterior predictions
#print(np.shape(U_lambda))
#print(np.shape(y))
z = utils.chol_solve(U_lambda,
np.dot(K_xp.T,
utils.chol_solve(U,
y - mu)))
U = utils.chol(K_pp)
# Now we can forget the location of the observations and
# consider only the pseudoinputs when predicting.
x = p
else:
K = K_noise
if K is None:
K = k(x,x)[0]
else:
try:
K += k(x,x)[0]
except:
K = K + k(x,x)[0]
# Compute posterior GP
N = len(y)
U = None
z = None
if N > 0:
U = utils.chol(K)
z = utils.chol_solve(U, y-mu)
def get_moments(h, covariance=1, mean=True):
K_xh = k(x, h)[0]
if k_sparse != None:
try:
# This may not work, for instance, if either one is a
# sparse matrix.
K_xh += k_sparse(x, h)[0]
except:
K_xh = K_xh + k_sparse(x, h)[0]
# NumPy has problems when mixing matrices and arrays.
# Matrices may appear, for instance, when you sum an array and
# a sparse matrix. Make sure the result is either an array or
# a sparse matrix (not dense matrix!), because matrix objects
# cause lots of problems:
#
# array.dot(array) = array
# matrix.dot(array) = matrix
# sparse.dot(array) = array
if not sp.issparse(K_xh):
K_xh = np.asarray(K_xh)
# Function for computing posterior moments
if mean:
# Mean vector
# FIXME: Ignoring the covariance of prior mu
m_h = m(h)[0]
if z != None:
m_h += K_xh.T.dot(z)
else:
m_h = None
# Compute (co)variance matrix/vector
if covariance:
if covariance == 1:
## Compute variance vector
k_h = k(h)[0]
if k_sparse != None:
k_h += k_sparse(h)[0]
if U != None:
if isinstance(K_xh, np.ndarray):
k_h -= np.einsum('i...,i...',
K_xh,
utils.chol_solve(U, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h -= np.asarray(K_xh.multiply(utils.chol_solve(U, K_xh))).sum(axis=0)
if pseudoinputs != None:
if isinstance(K_xh, np.ndarray):
k_h += np.einsum('i...,i...',
K_xh,
utils.chol_solve(U_lambda, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h += np.asarray(K_xh.multiply(utils.chol_solve(U_lambda, K_xh))).sum(axis=0)
# Ensure non-negative variances
k_h[k_h<0] = 0
return (m_h, k_h)
elif covariance == 2:
## Compute full covariance matrix
K_hh = k(h,h)[0]
if k_sparse != None:
K_hh += k_sparse(h)[0]
if U != None:
K_hh -= K_xh.T.dot(utils.chol_solve(U,K_xh))
#K_hh -= np.dot(K_xh.T, utils.chol_solve(U,K_xh))
if pseudoinputs != None:
K_hh += K_xh.T.dot(utils.chol_solve(U_lambda, K_xh))
#K_hh += np.dot(K_xh.T, utils.chol_solve(U_lambda, K_xh))
return (m_h, K_hh)
else:
return (m_h, None)
return get_moments
# Constant function using GP mean protocol
class Constant(EF.Node):
def __init__(self, f, **kwargs):
self.f = f
EF.Node.__init__(self, dims=[(np.inf,)], **kwargs)
def message_to_child(self, gradient=False):
# Wrapper
def func(x, gradient=False):
if gradient:
return ([self.f(x), None], [])
else:
return [self.f(x), None]
return func
#class MultiDimensional(EF.NodeVariable):
# """ A multi-dimensional Gaussian process f(x). """
## class ToGaussian(EF.NodeVariable):
## """ Deterministic node which transform a Gaussian process into
## finite-dimensional Gaussian variable. """
## def __init__(self, f, x, **kwargs):
## EF.NodeVariable.__init__(self,
## f,
## x,
## plates=
## dims=
# Deterministic node for creating a set of GPs which can be used as a
# mean function to a general GP node.
class Multiple(EF.Node):
def __init__(self, GPs, **kwargs):
# Ignore plates
EF.NodeVariable.__init__(self,
*GPs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def message_to_parent(self, index):
raise Exception("not implemented yet")
def message_to_child(self, gradient=False):
u = [parent.message_to_child() for parent in self.parents]
def get_moments(xh, **kwargs):
mh_all = []
khh_all = []
for i in range(len(self.parents)):
xi = np.array(xh[i])
#print(xi)
#print(np.shape(xi))
#print(xi)
# FIXME: We are ignoring the covariance of mu now..
if gradient:
((mh, khh), dm) = u[i](xi, **kwargs)
else:
(mh, khh) = u[i](xi, **kwargs)
#mh = u[i](xi, **kwargs)[0]
#print(mh)
#print(mh_all)
## print(mh)
## print(khh)
## print(np.shape(mh))
mh_all = np.concatenate([mh_all, mh])
#print(np.shape(mh_all))
if khh != None:
print(khh)
raise Exception('Not implemented yet for covariances')
#khh_all = np.concatenate([khh_all, khh])
# FIXME: Compute gradients!
if gradient:
return ([mh_all, khh_all], [])
else:
return [mh_all, khh_all]
#return [mh_all, khh_all]
return get_moments
# Gaussian process distribution
class GaussianProcess(EF.Node):
def __init__(self, m, k, k_sparse=None, pseudoinputs=None, **kwargs):
self.x = np.array([])
self.f = np.array([])
## self.x_obs = np.zeros((0,1))
## self.f_obs = np.zeros((0,))
if pseudoinputs != None:
pseudoinputs = EF.NodeConstant([pseudoinputs],
dims=[np.shape(pseudoinputs)])
# By default, posterior == prior
self.m = None #m
self.k = None #k
if isinstance(k, list) and isinstance(m, list):
if len(k) != len(m):
raise Exception('The number of mean and covariance functions must be equal.')
k = CF.Multiple(k)
m = Multiple(m)
elif isinstance(k, list):
D = len(k)
k = CF.Multiple(k)
m = Multiple(D*[m])
elif isinstance(m, list):
D = len(m)
k = CF.Multiple(D*[k])
m = Multiple(m)
# Ignore plates
EF.NodeVariable.__init__(self,
m,
k,
k_sparse,
pseudoinputs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def __call__(self, x, covariance=None):
if not covariance:
return self.u(x, covariance=False)[0]
elif covariance.lower() == 'vector':
return self.u(x, covariance=1)
elif covariance.lower() == 'matrix':
return self.u(x, covariance=2)
else:
raise Exception("Unknown covariance type requested")
def message_to_parent(self, index):
if index == 0:
k = self.parents[1].message_to_child()[0]
K = k(self.x, self.x)
return [self.x,
self.mu,
K]
if index == 1:
raise Exception("not implemented yet")
def message_to_child(self):
if self.observed:
raise Exception("Observable GP should not have children.")
return self.u
def get_parameters(self):
return self.u
def observe(self, x, f):
self.observed = True
self.x = x
self.f = f
## if np.ndim(f) == 1:
## self.f = np.asmatrix(f).T
## else:
## self.f = np.asmatrix(f)
# You might want:
# - mean for x
# - covariance (and mean) for x
# - variance (and mean) for x
# - i.e., mean and/or (co)variance for x
# - covariance for x1 and x2
def lower_bound_contribution(self, gradient=False):
# Get moment functions from parents
m = self.parents[0].message_to_child(gradient=gradient)
k = self.parents[1].message_to_child(gradient=gradient)
if self.parents[2]:
k_sparse = self.parents[2].message_to_child(gradient=gradient)
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child(gradient=gradient)
#pseudoinputs = self.parents[3].message_to_child(gradient=gradient)[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child(gradient=gradient)[0]
## k = self.parents[1].message_to_child(gradient=gradient)[0]
# Compute the parameters (covariance matrices etc) using
# parents' moment functions
DKs_xx = []
DKd_xx = []
DKd_xp = []
DKd_pp = []
Dxp = []
Dmu = []
if gradient:
# FIXME: We are ignoring the covariance of mu now..
((mu, _), Dmu) = m(self.x, gradient=True)
## if k_sparse:
## ((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
if pseudoinputs:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
((xp,), Dxp) = pseudoinputs
((Kd_pp,), DKd_pp) = k(xp,xp, gradient=True)
((Kd_xp,), DKd_xp) = k(self.x, xp, gradient=True)
else:
((K_xx,), DKd_xx) = k(self.x, self.x, gradient=True)
if k_sparse:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
else:
# FIXME: We are ignoring the covariance of mu now..
(mu, _) = m(self.x)
## if k_sparse:
## (Ks_xx,) = k_sparse(self.x, self.x)
if pseudoinputs:
(Ks_xx,) = k_sparse(self.x, self.x)
(xp,) = pseudoinputs
(Kd_pp,) = k(xp, xp)
(Kd_xp,) = k(self.x, xp)
else:
(K_xx,) = k(self.x, self.x)
if k_sparse:
(Ks_xx,) = k_sparse(self.x, self.x)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
mu = mu[0]
#K = K[0]
# Log pdf
if self.observed:
## Log pdf for directly observed GP
f0 = self.f - mu
#print('hereiam')
#print(K)
if pseudoinputs:
## Pseudo-input approximation
# Decompose the full-rank sparse/noise covariance matrix
try:
Us_xx = utils.cholesky(Ks_xx)
except linalg.LinAlgError:
print('Noise/sparse covariance not positive definite')
return -np.inf
# Use Woodbury-Sherman-Morrison formula with the
# following notation:
#
# y2 = f0' * inv(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx) * f0
#
# z = Ks_xx \ f0
# Lambda = Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp
# nu = inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
# rho = Kd_xp * inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
#
# y2 = f0' * z - z' * rho
z = Us_xx.solve(f0)
Lambda = Kd_pp + np.dot(Kd_xp.T,
Us_xx.solve(Kd_xp))
## z = utils.chol_solve(Us_xx, f0)
## Lambda = Kd_pp + np.dot(Kd_xp.T,
## utils.chol_solve(Us_xx, Kd_xp))
try:
U_Lambda = utils.cholesky(Lambda)
#U_Lambda = utils.chol(Lambda)
except linalg.LinAlgError:
print('Lambda not positive definite')
return -np.inf
nu = U_Lambda.solve(np.dot(Kd_xp.T, z))
#nu = utils.chol_solve(U_Lambda, np.dot(Kd_xp.T, z))
rho = np.dot(Kd_xp, nu)
y2 = np.dot(f0, z) - np.dot(z, rho)
# Use matrix determinant lemma
#
# det(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx)
# = det(Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp)
# * det(inv(Kd_pp)) * det(Ks_xx)
# = det(Lambda) * det(Ks_xx) / det(Kd_pp)
try:
Ud_pp = utils.cholesky(Kd_pp)
#Ud_pp = utils.chol(Kd_pp)
except linalg.LinAlgError:
print('Covariance of pseudo inputs not positive definite')
return -np.inf
logdet = (U_Lambda.logdet()
+ Us_xx.logdet()
- Ud_pp.logdet())
## logdet = (utils.logdet_chol(U_Lambda)
## + utils.logdet_chol(Us_xx)
## - utils.logdet_chol(Ud_pp))
# Compute the log pdf
L = gaussian_logpdf(y2,
0,
0,
logdet,
np.size(self.f))
# Add the variational cost of the pseudo-input
# approximation
# Compute gradients
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = np.nan
# Send the derivative message
func(d)
for (dKs_xx, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
for (dKd_xp, func) in DKd_xp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
V = Ud_pp.solve(Kd_xp.T)
Z = Us_xx.solve(V.T)
## V = utils.chol_solve(Ud_pp, Kd_xp.T)
## Z = utils.chol_solve(Us_xx, V.T)
for (dKd_pp, func) in DKd_pp:
# Compute derivative w.r.t. covariance matrix
d = (0.5 * np.trace(Ud_pp.solve(dKd_pp))
- 0.5 * np.trace(U_Lambda.solve(dKd_pp))
+ np.dot(nu, np.dot(dKd_pp, nu))
+ np.trace(np.dot(dKd_pp,
np.dot(V,Z))))
## d = (0.5 * np.trace(utils.chol_solve(Ud_pp, dKd_pp))
## - 0.5 * np.trace(utils.chol_solve(U_Lambda, dKd_pp))
## + np.dot(nu, np.dot(dKd_pp, nu))
## + np.trace(np.dot(dKd_pp,
## np.dot(V,Z))))
# Send the derivative message
func(d)
for (dxp, func) in Dxp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
else:
## Full exact (no pseudo approximations)
try:
U = utils.cholesky(K_xx)
#U = utils.chol(K_xx)
except linalg.LinAlgError:
print('non positive definite, return -inf')
return -np.inf
z = U.solve(f0)
#z = utils.chol_solve(U, f0)
#print(K)
L = utils.gaussian_logpdf(np.dot(f0, z),
0,
0,
U.logdet(),
## utils.logdet_chol(U),
np.size(self.f))
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = -np.sum(z)
# Send the derivative message
func(d)
for (dK, func) in DKd_xx:
# Compute derivative w.r.t. covariance matrix
#
# TODO: trace+chol_solve should be handled better
# for sparse matrices. Use sparse-inverse!
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
#print('derivate', d, dK)
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
#
# Send the derivative message
func(d)
for (dK, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
# Send the derivative message
func(d)
else:
## Log pdf for latent GP
raise Exception('Not implemented yet')
return L
## Let f1 be observed and f2 latent function values.
# Compute <log p(f1,f2|m,k)>
#L = gaussian_logpdf(sum_product(np.outer(self.f,self.f) + self.Cov,
# Compute <log q(f2)>
def update(self):
# Messages from parents
m = self.parents[0].message_to_child()
k = self.parents[1].message_to_child()
if self.parents[2]:
k_sparse = self.parents[2].message_to_child()
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child()[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child()[0]
## k = self.parents[1].message_to_child()[0]
if self.observed:
# Observations of this node
self.u = gp_posterior_moment_function(m,
k,
self.x,
self.f,
k_sparse=k_sparse,
pseudoinputs=pseudoinputs)
else:
x = np.array([])
y = np.array([])
# Messages from children
for (child,index) in self.children:
(msg, mask) = child.message_to_parent(index)
# Ignoring masks and plates..
# m[0] is the inputs
x = np.concatenate((x, msg[0]), axis=-2)
# m[1] is the observations
y = np.concatenate((y, msg[1]))
# m[2] is the covariance matrix
V = linalg.block_diag(V, msg[2])
self.u = gp_posterior_moment_function(m, k, x, y, covariance=V)
self.x = x
self.f = y
# At least for now, simplify this GP node such that a GP is either
# observed or latent. If it is observed, it doesn't take messages from
# children, actually, it should not even have children!
## # Pseudo for GPFA:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_rq(magnitude=.., lengthscale=.., alpha=..)
## f = NodeGPSet(0, [k1,k2,k3]) # assumes block diagonality
## # f = NodeGPSet(0, [[k11,k12,k13],[k21,k22,k23],[k31,k32,k33]])
## X = GaussianFromGP(f, [ [[t0,0],[t0,1],[t0,2]], [t1,0],[t1,1],[t1,2], ..])
## ...
## # Construct a sum of GPs if interested only in the sum term
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k = gp_cov_sum(k1, k2)
## f = NodeGP(0, k)
## f.observe(x, y)
## f.update()
## (mp, kp) = f.get_parameters()
## # Construct a sum of GPs when interested also in the individual
## # GPs:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_delta(magnitude=theta3)
## f = NodeGPSum(0, [k1,k2,k3])
## x = np.array([1,2,3,4,5,6,7,8,9,10])
## y = np.sin(x[0]) + np.random.normal(0, 0.1, (10,))
## # Observe the sum (index 0)
## f.observe((0,x), y)
## # Inference
## f.update()
## (mp, kp) = f.get_parameters()
## # Mean of the sum
## mp[0](...)
## # Mean of the individual terms
## mp[1](...)
## mp[2](...)
## mp[3](...)
## # Covariance of the sum
## kp[0][0](..., ...)
## # Other covariances
## kp[1][1](..., ...)
## kp[2][2](..., ...)
## kp[3][3](..., ...)
## kp[1][2](..., ...)
## kp[1][3](..., ...)
## kp[2][3](..., ...)
| mit |
spektom/incubator-airflow | airflow/providers/apache/hive/hooks/hive.py | 4 | 39159 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import socket
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile, TemporaryDirectory
import unicodecsv as csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.security import utils
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var():
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: str
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or conf.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self):
"""
This function set the proper proxy_user value in case the user overwtire the default.
"""
conn = self.conn
proxy_user_value = conn.extra_dejson.get('proxy_user', "")
if proxy_user_value == "login" and conn.login:
return "hive.server2.proxy.user={0}".format(conn.login)
if proxy_user_value == "owner" and self.run_as:
return "hive.server2.proxy.user={0}".format(self.run_as)
if proxy_user_value != "": # There is a custom proxy user
return "hive.server2.proxy.user={0}".format(proxy_user_value)
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = self._get_proxy_user()
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
@staticmethod
def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(schema=schema, hql=hql)
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search(r'(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id='metastore_default'):
self.conn_id = metastore_conn_id
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self._find_valid_server()
if ms is None:
raise AirflowException("Failed to locate the valid server.")
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
conn_socket = TSocket.TSocket(ms.host, ms.port)
if conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_server(self):
conns = self.get_connections(self.conn_id)
for conn in conns:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", conn.host, conn.port)
if host_socket.connect_ex((conn.host, conn.port)) == 0:
self.log.info("Connected to %s:%s", conn.host, conn.port)
host_socket.close()
return conn
else:
self.log.info("Could not connect to %s:%s", conn.host, conn.port)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the pyhive library
Notes:
* the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default')
def _get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
from pyhive.exc import ProgrammingError
if isinstance(hql, str):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, \
contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
db = self.get_connection(self.hiveserver2_conn_id)
if db.extra_dejson.get('run_set_variable_statements', True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute("set {}={}".format(k, v))
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (lowered_statement.startswith('select') or
lowered_statement.startswith('with') or
lowered_statement.startswith('show') or
(lowered_statement.startswith('set') and
'=' not in lowered_statement)):
description = [c for c in cur.description]
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(repr(description),
repr(previous_description))
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as file:
writer = csv.writer(file,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
bartvm/GroundHog | experiments/nmt/tree.py | 17 | 6574 | #!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import copy
import networkx as nx
import numpy
import experiments.nmt
from experiments.nmt import RNNEncoderDecoder, parse_input
import theano
import theano.tensor as TT
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--state", help="State to use")
parser.add_argument("--state-fn", help="Initialization function for state", default="prototype_state")
parser.add_argument("model_path", help="Path to the model")
parser.add_argument("changes", nargs="?", help="Changes to state", default="")
return parser.parse_args()
def main():
args = parse_args()
state = getattr(experiments.nmt, args.state_fn)()
if hasattr(args, 'state') and args.state:
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
assert state['enc_rec_layer'] == "RecursiveConvolutionalLayer", "Only works with gated recursive convolutional encoder"
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word = cPickle.load(open(state['word_indx'],'rb'))
idict_src = cPickle.load(open(state['indx_word'],'r'))
x = TT.lvector()
h = TT.tensor3()
proj_x = theano.function([x], enc_dec.encoder.input_embedders[0](
enc_dec.encoder.approx_embedder(x)).out, name='proj_x')
new_h, gater = enc_dec.encoder.transitions[0].step_fprop(
None, h, return_gates = True)
step_up = theano.function([h], [new_h, gater], name='gater_step')
while True:
try:
seqin = raw_input('Input Sequence: ')
seq,parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
print "Parsed Input:", parsed_in
except Exception:
print "Exception while parsing your input:"
traceback.print_exc()
continue
# get the initial embedding
new_h = proj_x(seq)
new_h = new_h.reshape(new_h.shape[0], 1, new_h.shape[1])
nodes = numpy.arange(len(seq)).tolist()
node_idx = len(seq)-1
rules = []
nodes_level = copy.deepcopy(nodes)
G = nx.DiGraph()
input_nodes = []
merge_nodes = []
aggregate_nodes = []
nidx = 0
vpos = 0
nodes_pos = {}
nodes_labels = {}
# input nodes
for nn in nodes[:-1]:
nidx += 1
G.add_node(nn, pos=(nidx, 0), ndcolor="blue", label="%d"%nn)
nodes_pos[nn] = (nidx, vpos)
nodes_labels[nn] = idict_src[seq[nidx-1]]
input_nodes.append(nn)
node_idx = len(seq) - 1
vpos += 6
for dd in xrange(len(seq)-1):
new_h, gater = step_up(new_h)
decisions = numpy.argmax(gater, -1)
new_nodes_level = numpy.zeros(len(seq) - (dd+1))
hpos = float(len(seq)+1) - 0.5 * (dd+1)
last_node = True
for nn in xrange(len(seq)-(dd+1)):
hpos -= 1
if not last_node:
# merge nodes
node_idx += 1
G.add_node(node_idx, ndcolor="red", label="m")
nodes_labels[node_idx] = ""
nodes_pos[node_idx] = (hpos, vpos)
G.add_edge(nodes_level[-(nn+1)], node_idx, weight=gater[-(nn+1),0,0])
G.add_edge(nodes_level[-(nn+2)], node_idx, weight=gater[-(nn+1),0,0])
merge_nodes.append(node_idx)
merge_node = node_idx
# linear aggregation nodes
node_idx += 1
G.add_node(node_idx, ndcolor="red", label="")
nodes_labels[node_idx] = "$+$"
nodes_pos[node_idx] = (hpos, vpos+6)
G.add_edge(merge_node, node_idx, weight=gater[-(nn+1),0,0])
G.add_edge(nodes_level[-(nn+2)], node_idx, weight=gater[-(nn+1),0,1])
G.add_edge(nodes_level[-(nn+1)], node_idx, weight=gater[-(nn+1),0,2])
aggregate_nodes.append(node_idx)
new_nodes_level[-(nn+1)] = node_idx
last_node = False
nodes_level = copy.deepcopy(new_nodes_level)
vpos += 12
# TODO: Show only strong edges.
threshold = float(raw_input('Threshold: '))
edges = [(u,v,d) for (u,v,d) in G.edges(data=True) if d['weight'] > threshold]
#edges = G.edges(data=True)
use_weighting = raw_input('Color according to weight [Y/N]: ')
if use_weighting == 'Y':
cm = plt.get_cmap('binary')
cNorm = colors.Normalize(vmin=0., vmax=1.)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorList = [scalarMap.to_rgba(d['weight']) for (u,v,d) in edges]
else:
colorList = 'k'
nx.draw_networkx_nodes(G, pos=nodes_pos, nodelist=input_nodes, node_color='white', alpha=1., edge_color='white')
nx.draw_networkx_nodes(G, pos=nodes_pos, nodelist=merge_nodes, node_color='blue', alpha=0.8, node_size=20)
nx.draw_networkx_nodes(G, pos=nodes_pos, nodelist=aggregate_nodes, node_color='red', alpha=0.8, node_size=80)
nx.draw_networkx_edges(G, pos=nodes_pos, edge_color=colorList, edgelist=edges)
nx.draw_networkx_labels(G,pos=nodes_pos,labels=nodes_labels,font_family='sans-serif')
plt.axis('off')
figname = raw_input('Save to: ')
if figname[-3:] == "pdf":
plt.savefig(figname, type='pdf')
else:
plt.savefig(figname)
plt.close()
G.clear()
if __name__ == "__main__":
main()
| bsd-3-clause |
googleinterns/amt-xpub | buffling/signal_to_noise_ratio.py | 1 | 4384 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate signal-to-noise ratio of the 'shuffling+flipping' algorithm."""
import numpy as np
import scipy as sp
import pandas as pd
def generate_transition_matrix(n_pub, p=0.1, first2=False, tol=1e-15):
'''Generate the transition matrix of logos.
Args:
n_pub: Integer, number of publishers.
p: Flipping probablity.
first2: Boolean, whether return only the first 2 rows.
tol: A small Real number. This will be add to the sum of each row,
followed by a row normalizing.
Return:
A numpy.array of n_pub x n_pub, containing the transition probability.
'''
## check input
assert int(n_pub) == n_pub and n_pub > 0, "n_pub must be a postive integer."
assert p > 0 and p < 1, "p can only range between (0,1)"
n_logo = 2 ** n_pub
n_row = 2 if first2 else n_logo
transition_mat = np.zeros((n_row, n_logo))
## use binary representation and compute hamming distance
for i in range(n_row):
for j in range(n_logo):
n_p = bin(i ^ j)[2:].count('1')
transition_mat[i,j] = p**n_p * (1 - p)**(n_pub - n_p)
rs = transition_mat.sum(axis=1) + tol ## add a small number to row sums
return transition_mat / rs[:, np.newaxis]
def calculate_signal_to_noise_ratio_of_logo_counts(
i, j, input_logo_wgt, transition_mat, maximizer=False):
'''Calculate the SNR with fixed incremental change of two logo counts.
This is a helper function to calculate_snr(). The two logo counts changed
are indexed by i and j.
Args:
i, j: Integer, indices of the two logo-counts that are changed.
input_logo_wgt: Input logo-counts weights.
transition_mat: Transition matrix of logo-counts with blipping.
maximizer: Boolean, whether return the linear coefficients.
Return:
A real number of SNR if maximizer is False. Otherwise,
both the SNR and the maximizing logo-counts coefficients.
'''
## check input
assert i <= transition_mat.shape[0] and j <= transition_mat.shape[1], \
"Illegal i or j."
## compute the B matrix
D = np.diag(input_logo_wgt.dot(transition_mat))
F = np.diag(input_logo_wgt)
B = D - (transition_mat.transpose()).dot(F.dot(transition_mat))
## incremental query from logo j to logo i
a = transition_mat[j,:] - transition_mat[i,:]
## solve for one set of linear coefficients
c = sp.linalg.solve(B, a)
## normalize to unite length
c -= np.mean(c)
c /= np.linalg.norm(c)
## compute the maximizing SNR
snr = sum((c * a) ** 2) / np.dot(c.transpose(), B.dot(c))
if maximizer:
return snr, c
return snr
def calculate_signal_to_noise_ratio(input_logo_wgt, transition_mat):
"""Calculate the signal-to-noise ratio for all possible incremental change.
Args:
input_logo_wgt: Input logo-counts weights.
transition_mat: Transition matrix of logo-counts with blipping.
Return:
A real number, the maximal SNR.
"""
n_logo = transition_mat.shape[0]
## brute-force all possible incremental change from logo i to logo j
s = np.zeros(transition_mat.shape)
for i in range(n_logo):
for j in range(n_logo):
if i == j:
continue
s[i,j] = calculate_signal_to_noise_ratio_of_logo_counts(
i, j, input_logo_wgt, transition_mat)
return s
def calculate_max_signal_to_noise_ratio(input_logo_wgt, transition_mat):
"""Calculate the max signal-to-noise ratio.
Calculate the signal-to-noise ratio upon the incremental change
from logo i to logo j.
Args:
input_logo_wgt: Input logo-counts weights.
transition_mat: Transition matrix of logo-counts with blipping.
Return:
A real number, the maximal signal-to-noise ratio.
"""
n_logo = transition_mat.shape[0]
i = 0
j = n_logo - 1
return calculate_signal_to_noise_ratio_of_logo_counts(
i, j, input_logo_wgt, transition_mat)
| apache-2.0 |
thientu/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
eg-zhang/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
ulisespereira/PereiraBrunel2016 | S2/fig5.py | 1 | 10899 | import numpy as np
import matplotlib.gridspec as gridspec
from scipy import sparse
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import math as mt
from stimulus import *
from myintegrator import *
import cProfile
import json
from matplotlib.colors import LogNorm
from matplotlib.ticker import MultipleLocator
import cPickle as pickle
# this is the transfer function
def phi(x,theta,uc):
myresult=nu*(x-theta)
myresult[x<theta]=0.
myresult[x>uc]=nu*(uc-theta)
return myresult
def phi_tanh(x):
return 0.5*(1+np.tanh(a1*(x+b1)))
def mytau(x): #time scale function synapses
myresult=(1e50)*np.ones(len(x))
myresult[x>thres]=tau_learning
#print x>thres
#print x
#myresult=(1e8)*(1.+np.tanh(-50.*(x-thres)))+tau_learning
#print myresult
return myresult
def mytauInv(x): #time scale function synapses
myresult=np.zeros(len(x))
myresult[x>thres]=1/tau_learning
return myresult
def winf(x_hist):
pre_u=phi(x_hist[0],theta,uc)
post_u=phi(x_hist[-1],theta,uc)
#parameters
n=len(pre_u)
vec_pre=0.5*(np.ones(n)+np.tanh(a_pre*(pre_u-b_pre)))
return (wmax/2.)*np.outer((np.ones(n)+np.tanh(a_post*(post_u-b_post))),vec_pre)
#function for the field
#x_hist is the 'historic' of the x during the delay period the zero is the oldest and the -1 is the newest
def tauWinv(x_hist):
pre_u=phi(x_hist[0],theta,uc)
post_u=phi(x_hist[-1],theta,uc)
tau_inv = np.add.outer(1/mytau(post_u),1/mytau(pre_u))
tau_inv[tau_inv == 2. / tau_learning] = 1./tau_learning
return tau_inv
#return tau_learning*np.outer(1./mytau(post_u),1./mytau(pre_u))
def F(u):
return .5*(1.+np.tanh(af*(u-bf)))
def field(t,a,x_hist,W,H):
pre_u=x_hist[0]
post_u=x_hist[-1]
n=len(pre_u)
conn_matrix=(W.T*H).T
field_u=(1/tau)*(mystim.stim(t)+conn_matrix.dot(phi(x_hist[-1],theta,uc))-x_hist[-1]-w_inh*np.dot(r1_matrix,phi(x_hist[-1],theta,uc)))#-a
field_a=0.#in the paper we are not using adaptation during learning
field_H=(H*(1.-(phi(post_u,theta,uc)/y0)))/tau_H
field_w=np.multiply(tauWinv(x_hist),winf(x_hist)-W)
return field_a,field_u,field_w,field_H
def fieldQuadratic(t,a,x_hist,W,H):
pre_u=x_hist[0]
post_u=x_hist[-1]
n=len(pre_u)
conn_matrix=(W.T*H).T
field_u=(1/tau)*(mystim.stim(t)+conn_matrix.dot(phi(x_hist[-1],theta,uc))-x_hist[-1]-w_inh*np.dot(r1_matrix,phi(x_hist[-1],theta,uc)))#-a
field_a=0.#in the paper we are not using adaptation during learning
field_H=(H*(1.-(phi(post_u,theta,uc)/y0))-H*H)/tau_H
field_w=np.multiply(tauWinv(x_hist),winf(x_hist)-W)
return field_a,field_u,field_w,field_H
#This are a the parameters of the simulation
#This are a the parameters of the simulation
#open parameters of the model
n=10 #n pop
delay=15.3
tau=10. #timescale of populations
tau_H=10000.#10000
af=0.1
bf=0.
y0=.05*np.ones(n)# 0.12
w_i=1.
w_inh=w_i/n
nu=1.
theta=0.
uc=1.
wmax=1.6
thres=0.6
beta=1.6
tau_a=10.
#parameters stimulation
dt=0.5
lagStim=100.
times=80#235
amp=2.5
delta=12.
period=13.
bf=10.
xf=0.7
a_post=bf
b_post=xf
a_pre=bf
b_pre=xf
tau_learning=400.#30000.
a1=6.
b1=-0.25
#-------------------------------------------------------------------
#-----------------Stimulation of Populations------------------------
#-------------------------------------------------------------------
# setting up the simulation
r1_matrix=np.ones((n,n))
patterns=np.identity(n)
patterns=[patterns[:,i] for i in range(n)]
mystim=stimulus(patterns,lagStim,delta,period,times)
mystim.inten=amp
#integrator
npts=int(np.floor(delay/dt)+1) # points delay
tmax=times*(lagStim+n*(period+delta))+100.+mystim.delay_begin
thetmax=tmax + 20.5 * tau_H
#initial conditions
a0=np.zeros((npts,n))
x0=0.1*np.ones((npts,n))
W0=[0.1*np.ones((n,n)) for i in range(npts)]
H0=[np.array([0.1 for i in range(n)]) for i in range(npts)]
#H0=[np.array([19.52158144,13.31267976,13.35448593,13.35612847,13.35535822,13.35451532,13.35366458,13.35281449,13.35258073,13.35252602]) for i in range(npts)]
#H0=[0.5*np.ones(n) for i in range(npts)]
theintegrator=myintegrator(delay,dt,n,thetmax)
theintegrator.fast=False
adapt,u,connectivity,W01,myH,t=theintegrator.DDE_Norm_Miller(field,a0,x0,W0,H0)
W0=[0.1*np.ones((n,n)) for i in range(npts)]
H0=[0.1*np.ones(n) for i in range(npts)]
adaptQ,uQ,connectivityQ,W01Q,myHQ,tQ=theintegrator.DDE_Norm_Miller(fieldQuadratic,a0,x0,W0,H0)
print 'Linear'
print myH[-1]
print connectivity[-1]
print 'Quadratic'
print myHQ[-1]
print connectivityQ[-1]
#----------------------------------------------------------------------
#------------Synaptic Weights------------------------------------------
#----------------------------------------------------------------------
rc={'axes.labelsize': 32, 'font.size': 30, 'legend.fontsize': 25, 'axes.titlesize': 30}
plt.rcParams.update(**rc)
plt.rcParams['image.cmap'] = 'jet'
for i in range(10):
plt.plot(t,connectivity[:,i,i],'c',lw=1)
for i in range(0,9):
plt.plot(t,connectivity[:,i+1,i],'y',lw=1)
for i in range(8):
plt.plot(t,connectivity[:,i+2,i],'g',lw=1)
for i in range(9):
plt.plot(t,connectivity[:,i,i+1],'r',lw=1)
for i in range(8):
plt.plot(t,connectivity[:,i,i+2],'b',lw=1)
for i in range(10):
plt.plot(t,connectivityQ[:,i,i],'c',lw=5,alpha=0.05)
for i in range(0,9):
plt.plot(t,connectivityQ[:,i+1,i],'y',lw=5,alpha=0.05)
for i in range(8):
plt.plot(t,connectivityQ[:,i+2,i],'g',lw=5, alpha = 0.05)
for i in range(9):
plt.plot(t,connectivityQ[:,i,i+1],'r',lw=5, alpha=0.05)
for i in range(8):
plt.plot(t,connectivityQ[:,i,i+2],'b', lw=5, alpha=0.05)
plt.xlim([0,thetmax])
#plt.xticks([0,100000,200000],[0,100,200])
plt.xticks([0,100000,200000],[0,100,200])
plt.ylim([0,1.8])
plt.yticks([0.5,1.,1.5])
plt.xlabel('Time (s)')
plt.ylabel('Synaptic Weights')
plt.savefig('connectivitystimulationH.pdf',transparent=True, bbox_inches='tight')
plt.xlim([0,tmax])
plt.xticks([0,10000,20000],[0,10,20])
plt.ylim([0,1.8])
plt.yticks([0.5,1.,1.5])
plt.xlabel('Time (s)')
plt.ylabel('Synaptic Weights')
plt.savefig('connectivitystimulationHzoom.pdf', transparent=True,bbox_inches='tight')
plt.close()
#------------------------------------------------------------------------
#-------------Homeostatic Variable --------------------------------------
#------------------------------------------------------------------------
fig = plt.figure(figsize=(7, 6))
ax1 = fig.add_subplot(111)
colormap = plt.cm.tab20
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 1.,n)])
ax1.semilogy(t,myH[:],lw=5)
ax1.semilogy(tQ,myHQ[:],'--',lw=5)
#plt.ylim([0,5.])
#plt.yticks([1,2,3,4,5])
ax1.set_xlim([0,thetmax])
#ax1.set_xticks([0,100000,200000],[0,100,200])
ax1.set_xticks([0,100000,200000])
ax1.set_xticklabels([0,100,200])
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('H')
plt.savefig('HdynamicsLearning.pdf',transparent=True, bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(7, 6))
ax2 = fig.add_subplot(111)
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 1.,n)])
ax2.plot(t,myH[:],lw=5)
ax2.plot(tQ,myHQ[:],'--',lw=5)
#ax2.set_ylim([0,1.2])
ax2.set_ylim([0,.1])
ax2.set_yticks([0.1])
ax2.set_xlim([0,tmax])
#ax2.set_xticks([0,20000,40000,60000,80000],[0,20,40,60,80])
ax2.set_xticks([0,10000,20000])
ax2.set_xticklabels([0,10,20])
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('H')
plt.savefig('HdynamicsLearningzoom.pdf',transparent=True, bbox_inches='tight')
#plt.show()
plt.close()
print 'HdynamicsLearningzoom.pdf',' is saved'
#------------Synaptic Weights------------------------------------------
#----------------------------------------------------------------------
rc={'axes.labelsize': 32, 'font.size': 30, 'legend.fontsize': 25, 'axes.titlesize': 30}
plt.rcParams.update(**rc)
plt.rcParams['image.cmap'] = 'jet'
connectivityM_l = np.array([np.transpose(np.multiply(np.transpose(connectivity[l,:,:]),myH[l,:])) for l in range(len(t))])
connectivityM_Q = np.array([np.transpose(np.multiply(np.transpose(connectivityQ[l,:,:]),myHQ[l,:])) for l in range(len(t))])
for i in range(10):
plt.semilogy(t,connectivityM_l[:,i,i],'c',lw=1)
for i in range(0,9):
plt.semilogy(t,connectivityM_l[:,i+1,i],'y',lw=1)
for i in range(8):
plt.semilogy(t,connectivityM_l[:,i+2,i],'g',lw=1)
for i in range(9):
plt.semilogy(t,connectivityM_l[:,i,i+1],'r',lw=1)
for i in range(8):
plt.semilogy(t,connectivityM_l[:,i,i+2],'b',lw=1)
for i in range(10):
plt.semilogy(t,connectivityM_Q[:,i,i],'c',lw=5,alpha=0.05)
for i in range(0,9):
plt.semilogy(t,connectivityM_Q[:,i+1,i],'y',lw=5,alpha=0.05)
for i in range(8):
plt.semilogy(t,connectivityM_Q[:,i+2,i],'g',lw=5, alpha = 0.05)
for i in range(9):
plt.semilogy(t,connectivityM_Q[:,i,i+1],'r',lw=5, alpha=0.05)
for i in range(8):
plt.semilogy(t,connectivityM_Q[:,i,i+2],'b', lw=5, alpha=0.05)
plt.xlim([0,thetmax])
#plt.xticks([0,100000,200000],[0,100,200])
plt.xticks([0,100000,200000],[0,100,200])
#plt.ylim([0,1.8])
#plt.yticks([0.5,1.,1.5])
plt.xlabel('Time (s)')
plt.ylabel('Synaptic Weights')
plt.savefig('connectivitystimulationWH.pdf',transparent=True, bbox_inches='tight')
plt.xlim([0,tmax])
plt.xticks([0,10000,20000],[0,10,20])
plt.ylim([0,1.8])
plt.yticks([0.5,1.,1.5])
plt.xlabel('Time (s)')
plt.ylabel('Synaptic Weights')
plt.savefig('connectivitystimulationWHzoom.pdf', transparent=True,bbox_inches='tight')
plt.close()
#--------------------------------------------------------------------------
#-------------Printing Connectivity Matrices-------------------------------
#--------------------------------------------------------------------------
# matrix connectivity and homeostatic after stimulation
linearW=np.transpose(np.multiply(np.transpose(connectivity[-1,:,:]),myH[-1,:]))
linearWsep=np.transpose(connectivity[-1,:,:])
QW=np.transpose(np.multiply(np.transpose(connectivityQ[-1,:,:]),myHQ[-1,:]))
QWsep=connectivityQ[-1,:,:]
fig = plt.figure(figsize=(12, 12))
gs0 = gridspec.GridSpec(2, 2)
gs0.update(wspace=0.1,hspace=0.1)
#gs0.update(wspace=0.1,hspace=0.1)
ax3a = plt.subplot(gs0[0,0])
ax3b = plt.subplot(gs0[0,1])
ax3c = plt.subplot(gs0[1,0])
ax3d = plt.subplot(gs0[1,1])
vmax=wmax
#titles=['Linear'+r' $\matbb{W}$','Linear'+r' $\matbf{W}$','Modified'+r' $\matbb{W}$','Modified'+r' $\matbf{W}$']
plt3a = ax3a.matshow(linearWsep, vmin=0, vmax = vmax)
ax3a.set_xticks([])
ax3a.set_yticks([])
plt3b = ax3b.matshow(linearW, vmin=0,vmax=vmax)
ax3b.set_xticks([])
ax3b.set_yticks([])
#
plt3c = ax3c.matshow(QWsep, vmin=0, vmax=vmax)
ax3c.set_xticks([])
ax3c.set_yticks([])
plt3d = ax3d.matshow(QW, vmin=0, vmax=vmax)
ax3d.set_xticks([])
ax3d.set_yticks([])
sm = plt.cm.ScalarMappable(cmap=plt.cm.jet, norm=plt.Normalize(vmin=0., vmax=vmax))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
cax = fig.add_axes([0.95, 0.11, 0.05, 0.77]) # [left, bottom, width, height]
myticks=[0.,vmax/2.,vmax]
cbar=fig.colorbar(sm, cax=cax,ticks=myticks,alpha=1.)
cbar.ax.tick_params(labelsize=45.)
plt.savefig('QW.pdf', bbox_inches='tight')
plt.close()
| gpl-2.0 |
toobaz/pandas | pandas/tests/util/test_assert_numpy_array_equal.py | 2 | 5404 | import numpy as np
import pytest
from pandas import Timestamp
from pandas.util.testing import assert_numpy_array_equal
def test_assert_numpy_array_equal_shape_mismatch():
msg = """numpy array are different
numpy array shapes are different
\\[left\\]: \\(2L*,\\)
\\[right\\]: \\(3L*,\\)"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))
def test_assert_numpy_array_equal_bad_type():
expected = "Expected type"
with pytest.raises(AssertionError, match=expected):
assert_numpy_array_equal(1, 2)
@pytest.mark.parametrize(
"a,b,klass1,klass2",
[(np.array([1]), 1, "ndarray", "int"), (1, np.array([1]), "int", "ndarray")],
)
def test_assert_numpy_array_equal_class_mismatch(a, b, klass1, klass2):
msg = """numpy array are different
numpy array classes are different
\\[left\\]: {klass1}
\\[right\\]: {klass2}""".format(
klass1=klass1, klass2=klass2
)
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(a, b)
def test_assert_numpy_array_equal_value_mismatch1():
msg = """numpy array are different
numpy array values are different \\(66\\.66667 %\\)
\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))
def test_assert_numpy_array_equal_value_mismatch2():
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))
def test_assert_numpy_array_equal_value_mismatch3():
msg = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(
np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])
)
def test_assert_numpy_array_equal_value_mismatch4():
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1\\.1, 2\\.000001\\]
\\[right\\]: \\[1\\.1, 2.0\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
def test_assert_numpy_array_equal_value_mismatch5():
msg = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(
np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])
)
def test_assert_numpy_array_equal_value_mismatch6():
msg = """numpy array are different
numpy array values are different \\(25\\.0 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array([[1, 2], [3, 4]]), np.array([[1, 3], [3, 4]]))
def test_assert_numpy_array_equal_shape_mismatch_override():
msg = """Index are different
Index shapes are different
\\[left\\]: \\(2L*,\\)
\\[right\\]: \\(3L*,\\)"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]), obj="Index")
def test_numpy_array_equal_unicode():
# see gh-20503
#
# Test ensures that `assert_numpy_array_equals` raises the right
# exception when comparing np.arrays containing differing unicode objects.
msg = """numpy array are different
numpy array values are different \\(33\\.33333 %\\)
\\[left\\]: \\[á, à, ä\\]
\\[right\\]: \\[á, à, å\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(np.array(["á", "à", "ä"]), np.array(["á", "à", "å"]))
def test_numpy_array_equal_object():
a = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-01")])
b = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")])
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]
\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(a, b)
@pytest.mark.parametrize("other_type", ["same", "copy"])
@pytest.mark.parametrize("check_same", ["same", "copy"])
def test_numpy_array_equal_copy_flag(other_type, check_same):
a = np.array([1, 2, 3])
msg = None
if other_type == "same":
other = a.view()
else:
other = a.copy()
if check_same != other_type:
msg = (
r"array\(\[1, 2, 3\]\) is not array\(\[1, 2, 3\]\)"
if check_same == "same"
else r"array\(\[1, 2, 3\]\) is array\(\[1, 2, 3\]\)"
)
if msg is not None:
with pytest.raises(AssertionError, match=msg):
assert_numpy_array_equal(a, other, check_same=check_same)
else:
assert_numpy_array_equal(a, other, check_same=check_same)
| bsd-3-clause |
colinbrislawn/scikit-bio | skbio/draw/tests/test_distributions.py | 7 | 27970 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import warnings
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
from skbio.draw import boxplots, grouped_distributions
from skbio.draw._distributions import (
_calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,
_create_legend, _get_distribution_markers, _is_single_matplotlib_color,
_plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,
_set_figure_size, _validate_input, _validate_x_values)
class DistributionsTests(TestCase):
def setUp(self):
# Test null data list.
self.Null = None
# Test empty data list.
self.Empty = []
# Test nested empty data list.
self.EmptyNested = [[]]
# Test nested empty data list (for bar/scatter plots).
self.EmptyDeeplyNested = [[[]]]
# Test invalid number of samples in data list (for bar/scatter plots).
self.InvalidNumSamples = [[[1, 2, 3, 4, 5]],
[[4, 5, 6, 7, 8], [2, 3, 2]],
[[4, 7, 10, 33, 32, 6, 7, 8]]]
# Test valid data with three samples and four data points
# (for bar/scatter plots).
self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]],
[[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]],
[[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]],
[[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]]
# Test valid data with one sample (for bar/scatter plots).
self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]],
[[4, 5, 6, 7, 8]],
[[4, 7, 10, 33, 32, 6, 7, 8]]]
# Test typical data to be plotted by the boxplot function.
self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99],
[2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8],
[2, 9, 7, 5, 6]]
def tearDown(self):
# We get a warning from mpl if we don't clean up our figures.
plt.close('all')
def test_validate_input_null(self):
with npt.assert_raises(ValueError):
_validate_input(self.Null, None, None, None)
def test_validate_input_empty(self):
with npt.assert_raises(ValueError):
_validate_input(self.Empty, None, None, None)
def test_validate_input_empty_nested(self):
with npt.assert_raises(ValueError):
_validate_input(self.EmptyNested, None, None, None)
def test_validate_input_empty_deeply_nested(self):
num_points, num_samples = _validate_input(self.EmptyDeeplyNested,
None, None, None)
self.assertEqual(num_points, 1)
self.assertEqual(num_samples, 1)
def test_validate_input_empty_point(self):
with npt.assert_raises(ValueError):
_validate_input([[[1, 2, 3], [4, 5]], []], None, None, None)
def test_validate_input_invalid_num_samples(self):
with npt.assert_raises(ValueError):
_validate_input(self.InvalidNumSamples, None, None, None)
def test_validate_input_invalid_data_point_names(self):
with npt.assert_raises(ValueError):
_validate_input(self.ValidSingleSampleData, None, ["T0", "T1"],
None)
def test_validate_input_invalid_sample_names(self):
with npt.assert_raises(ValueError):
_validate_input(self.ValidSingleSampleData, None, None,
["Men", "Women"])
def test_validate_input_all_valid_input(self):
self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8],
["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"]),
(4, 3))
def test_validate_x_values_invalid_x_values(self):
with npt.assert_raises(ValueError):
_validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"],
len(self.ValidSingleSampleData))
def test_validate_x_values_invalid_x_tick_labels(self):
with npt.assert_raises(ValueError):
_validate_x_values(None, ["T0"], len(self.ValidSingleSampleData))
def test_validate_x_values_nonnumber_x_values(self):
with npt.assert_raises(ValueError):
_validate_x_values(["foo", 2, 3], None,
len(self.ValidSingleSampleData))
def test_validate_x_values_valid_x_values(self):
_validate_x_values([1, 2.0, 3], None, 3)
def test_get_distribution_markers_null_marker_list(self):
self.assertEqual(_get_distribution_markers('colors', None, 5),
['b', 'g', 'r', 'c', 'm'])
def test_get_distribution_markers_empty_marker_list(self):
self.assertEqual(_get_distribution_markers('colors', None, 4),
['b', 'g', 'r', 'c'])
def test_get_distribution_markers_insufficient_markers(self):
expected = ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']
# adapted from SO example here: http://stackoverflow.com/a/3892301
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
actual = _get_distribution_markers('colors', None, 10)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(actual, expected)
expected = ['^', '>', '<', '^', '>']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
actual = _get_distribution_markers('symbols', ['^', '>', '<'], 5)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(actual, expected)
def test_get_distribution_markers_bad_marker_type(self):
with npt.assert_raises(ValueError):
_get_distribution_markers('shapes', [], 3)
def test_get_distribution_markers_zero_markers(self):
self.assertEqual(_get_distribution_markers('symbols', None, 0), [])
self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), [])
def test_get_distribution_markers_negative_num_markers(self):
with npt.assert_raises(ValueError):
_get_distribution_markers('symbols', [], -1)
def test_plot_bar_data(self):
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')
self.assertEqual(result[0].__class__.__name__, "Rectangle")
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0].get_width(), 0.5)
self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertAlmostEqual(result[0].get_height(), 2.0)
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')
self.assertEqual(result[0].__class__.__name__, "Rectangle")
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0].get_width(), 0.5)
self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertAlmostEqual(result[0].get_height(), 2.0)
def test_plot_bar_data_bad_error_bar_type(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')
def test_plot_bar_data_empty(self):
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')
self.assertTrue(result is None)
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')
self.assertTrue(result is None)
def test_plot_scatter_data(self):
fig, ax = plt.subplots()
result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')
self.assertEqual(result.get_sizes(), 20)
def test_plot_scatter_data_empty(self):
fig, ax = plt.subplots()
result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv')
self.assertTrue(result is None)
def test_plot_box_data(self):
fig, ax = plt.subplots()
result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55,
1.5, 'stdv')
self.assertEqual(result.__class__.__name__, "dict")
self.assertEqual(len(result['boxes']), 1)
self.assertEqual(len(result['medians']), 1)
self.assertEqual(len(result['whiskers']), 2)
# mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one,
# though the resulting plot looks identical between the two versions.
# see:
# https://github.com/pydata/pandas/issues/8382#issuecomment-56840974
# https://github.com/matplotlib/matplotlib/issues/3544
self.assertTrue(len(result['fliers']) == 1 or
len(result['fliers']) == 2)
self.assertEqual(len(result['caps']), 2)
def test_plot_box_data_empty(self):
fig, ax = plt.subplots()
result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')
self.assertTrue(result is None)
def test_calc_data_point_locations_invalid_x_values(self):
with npt.assert_raises(ValueError):
_calc_data_point_locations(3, [1, 10.5])
def test_calc_data_point_locations_default_spacing(self):
locs = _calc_data_point_locations(4)
np.testing.assert_allclose(locs, [1, 2, 3, 4])
def test_calc_data_point_locations_custom_spacing(self):
# Scaling down from 3..12 to 1..4.
locs = _calc_data_point_locations(4, [3, 4, 10, 12])
np.testing.assert_allclose(locs,
np.array([1, 1.33333333, 3.33333333, 4]))
# Sorted order shouldn't affect scaling.
locs = _calc_data_point_locations(4, [4, 3, 12, 10])
np.testing.assert_allclose(locs,
np.array([1.33333333, 1, 4, 3.33333333]))
# Scaling up from 0.001..0.87 to 1..3.
locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87])
np.testing.assert_allclose(locs,
np.array([1, 1.58296893, 3]))
def test_calc_data_point_ticks(self):
ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False)
np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25])
ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False)
np.testing.assert_allclose(ticks, [0.75])
def test_set_axes_options(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1"])
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
def test_set_axes_options_ylim(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1)
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
self.assertEqual(ax.get_ylim(), (0.0, 1.0))
def test_set_axes_options_x_values_as_tick_labels(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_values=[42, 45, 800])
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), '42')
self.assertEqual(ax.get_xticklabels()[1].get_text(), '45')
self.assertEqual(ax.get_xticklabels()[2].get_text(), '800')
def test_set_axes_options_bad_ylim(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1", "T2"], y_min='car',
y_max=30)
def test_set_axes_options_invalid_x_tick_labels_orientation(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1"],
x_tick_labels_orientation='brofist')
def test_create_legend(self):
fig, ax = plt.subplots()
_create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')
self.assertEqual(len(ax.get_legend().get_texts()), 2)
fig, ax = plt.subplots()
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
'symbols')
self.assertEqual(len(ax.get_legend().get_texts()), 3)
def test_create_legend_invalid_input(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')
with npt.assert_raises(ValueError):
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
'foo')
def test_grouped_distributions_bar(self):
fig = grouped_distributions('bar', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'r', 'g'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.1125, 2.0125, 3.8125, 4.1125])
def test_grouped_distributions_insufficient_colors(self):
args = ('bar', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['b', 'r'], "x-axis label", "y-axis label", "Test")
# adapted from SO example here: http://stackoverflow.com/a/3892301
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
grouped_distributions(*args)
expected_warning_raised = False
for warning in w:
if issubclass(warning.category, RuntimeWarning):
expected_warning_raised = True
break
self.assertTrue(expected_warning_raised)
def test_grouped_distributions_scatter(self):
fig = grouped_distributions('scatter', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['^', '>', '<'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.075, 1.975, 3.775, 4.075])
def test_grouped_distributions_insufficient_symbols(self):
args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['^'], "x-axis label", "y-axis label", "Test")
# adapted from SO example here: http://stackoverflow.com/a/3892301
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
grouped_distributions(*args)
expected_warning_raised = False
for warning in w:
if issubclass(warning.category, RuntimeWarning):
expected_warning_raised = True
break
self.assertTrue(expected_warning_raised)
def test_grouped_distributions_empty_marker_list(self):
grouped_distributions('scatter', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"], [],
"x-axis label", "y-axis label", "Test")
def test_grouped_distributions_box(self):
fig = grouped_distributions('box', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'g', 'y'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.075, 1.975, 3.775, 4.075])
def test_grouped_distributions_error(self):
with npt.assert_raises(ValueError):
grouped_distributions('pie', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'g', 'y'],
"x-axis label", "y-axis label", "Test")
def test_grouped_distributions_negative_distribution_width(self):
args = ('box', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['b', 'g', 'y'], "x-axis label", "y-axis label", "Test")
with self.assertRaises(ValueError):
grouped_distributions(*args, distribution_width=0)
with self.assertRaises(ValueError):
grouped_distributions(*args, distribution_width=-42)
def test_boxplots(self):
fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label",
legend=(('blue', 'red'), ('foo', 'bar')))
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
def test_boxplots_empty_distributions(self):
fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
# second distribution (empty) should have nans since it is hidden.
# boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has
# 7. in either case, the line at index 8 should have a nan for its y
# value
lines = ax.get_lines()
self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
# line in first distribution should *not* have nan for its y value
self.assertFalse(np.isnan(lines[0].get_xydata()[0][1]))
# All distributions are empty.
fig = boxplots([[], [], []], [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
lines = ax.get_lines()
self.assertTrue(np.isnan(lines[0].get_xydata()[0][1]))
self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
self.assertTrue(np.isnan(lines[16].get_xydata()[0][1]))
def test_boxplots_box_colors(self):
# Coloring works with all empty distributions.
fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow'])
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
# patch colors should match what we specified
self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
# patch location should include at least one nan since the distribution
# is empty, and thus hidden
for patch in ax.patches:
self.assertTrue(np.isnan(patch.xy[0][1]))
fig = boxplots([[], [], []], box_colors='pink')
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
for patch in ax.patches:
npt.assert_almost_equal(
patch.get_facecolor(),
(1.0, 0.7529411764705882, 0.796078431372549, 1.0))
self.assertTrue(np.isnan(patch.xy[0][1]))
# Coloring works with some empty distributions.
fig = boxplots([[], [1, 2, 3.5], []],
box_colors=['blue', 'red', 'yellow'])
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
self.assertTrue(np.isnan(ax.patches[0].xy[0][1]))
self.assertFalse(np.isnan(ax.patches[1].xy[0][1]))
self.assertTrue(np.isnan(ax.patches[2].xy[0][1]))
def test_boxplots_invalid_input(self):
# Non-numeric entries in distribution.
with npt.assert_raises(ValueError):
boxplots([[1, 'foo', 3]])
# Number of colors doesn't match number of distributions.
with npt.assert_raises(ValueError):
boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])
# Invalid legend.
with npt.assert_raises(ValueError):
boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz'))
def test_color_box_plot(self):
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])
# Some colors are None.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)])
# All colors are None.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, [None, None, None])
def test_color_box_plot_invalid_input(self):
# Invalid color.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
with npt.assert_raises(ValueError):
_color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue'])
# Wrong number of colors.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
with npt.assert_raises(ValueError):
_color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)])
def test_is_single_matplotlib_color(self):
self.assertTrue(_is_single_matplotlib_color('w'))
self.assertTrue(_is_single_matplotlib_color('white'))
self.assertTrue(_is_single_matplotlib_color([1, 1, 1]))
self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1]))
self.assertTrue(_is_single_matplotlib_color((1, 1, 1)))
self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1)))
self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0)))
self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0)))
self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0)))
self.assertFalse(_is_single_matplotlib_color(['w', 'r']))
self.assertFalse(_is_single_matplotlib_color(['w']))
self.assertFalse(_is_single_matplotlib_color(('w',)))
self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),)))
self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),
(0.9, 0.9))))
def test_set_figure_size(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
_set_figure_size(fig, 3, 4)
self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4)))
def test_set_figure_size_defaults(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
orig_fig_size = fig.get_size_inches()
_set_figure_size(fig)
self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
def test_set_figure_size_invalid(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
orig_fig_size = fig.get_size_inches()
_set_figure_size(fig, -1, 0)
self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
def test_set_figure_size_long_labels(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofooooooooooooooooooooooooo'
'oooooooooooooooooooooooooooooooo'
'oooooooooooooooooooooooooooooooo'
'oooo', 'barbarbar'],
x_tick_labels_orientation='vertical')
# adapted from SO example here: http://stackoverflow.com/a/3892301
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
_set_figure_size(fig, 3, 3)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
npt.assert_array_equal(fig.get_size_inches(), (3, 3))
if __name__ == '__main__':
main()
| bsd-3-clause |
karvenka/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_version_mapreduce.py | 19 | 4617 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.cloud == "chameleon"]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if mongo_version != 'X':
benchmark_df = benchmark_df[benchmark_df.mongo_version == mongo_version]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
#benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
#print benchmark_df1['shard_replicas']
#print benchmark_df1
#print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(mapreduce_seconds_32, shards_32, mapreduce_seconds_34, shards_34):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: shards_kilo Array with shards from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: shards_chameleon Array with shards from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average MongoImport Runtime with Various Numbers of Shards')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Number of Shards')
# Make the chart
plt.plot(shards_32, mapreduce_seconds_32, label='Version 3.2')
plt.plot(shards_34, mapreduce_seconds_34, label='Version 3.4')
#http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/version_mapreduce.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
mongo_version = 32
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
mapreduce_seconds_32=select_df.as_matrix(columns=[select_df.columns[8]])
shards_32 = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
mongo_version = 34
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
mapreduce_seconds_34=select_df.as_matrix(columns=[select_df.columns[8]])
shards_34 = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(mapreduce_seconds_32, shards_32, mapreduce_seconds_34, shards_34)
| apache-2.0 |
JackKelly/neuralnilm_prototype | neuralnilm/net.py | 2 | 25235 | from __future__ import division, print_function
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import h5py
from datetime import datetime, timedelta
import logging
from numpy.random import rand
from time import time
from os.path import exists, join
import theano
import theano.tensor as T
import lasagne
from lasagne.layers import (InputLayer, ReshapeLayer, Layer,
ConcatLayer, ElemwiseSumLayer, DenseLayer,
get_all_layers, Conv1DLayer, FeaturePoolLayer,
DimshuffleLayer, ConcatLayer)
try:
from lasagne.layers import LSTMLayer, RecurrentLayer
from neuralnilm.layers import BLSTMLayer
from neuralnilm.layers import BidirectionalRecurrentLayer
except ImportError:
RECURRENT_LAYERS = [DimshuffleLayer]
else:
RECURRENT_LAYERS = [LSTMLayer, BLSTMLayer, DimshuffleLayer,
RecurrentLayer, BidirectionalRecurrentLayer]
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.utils import floatX
from lasagne.updates import nesterov_momentum
from lasagne.objectives import squared_error
from .source import quantize
from .layers import MixtureDensityLayer
from .utils import sfloatX, none_to_dict, ndim_tensor
from .plot import Plotter
from .batch_norm import batch_norm
class ansi:
# from dnouri/nolearn/nolearn/lasagne.py
BLUE = '\033[94m'
GREEN = '\033[32m'
ENDC = '\033[0m'
class TrainingError(Exception):
pass
# ####################### Neural network class ########################
class Net(object):
# Much of this code is adapted from craffel/nntools/examples/lstm.py
def __init__(self, source, layers_config,
updates_func=nesterov_momentum,
updates_kwargs=None,
learning_rate=0.1,
learning_rate_changes_by_iteration=None,
experiment_name="",
validation_interval=10,
save_plot_interval=100,
loss_function=lambda x, t: squared_error(x, t).mean(),
layer_changes=None,
seed=42,
epoch_callbacks=None,
do_save_activations=True,
plotter=Plotter(),
auto_reshape=True,
logger=None):
"""
Parameters
----------
layers_config : list of dicts. Keys are:
'type' : BLSTMLayer or a subclass of lasagne.layers.Layer
'num_units' : int
"""
if logger is None:
self.logger = logging.getLogger(experiment_name)
else:
self.logger = logger
self.logger.info("Initialising network...")
if seed is not None:
np.random.seed(seed)
self.source = source
self.updates_func = updates_func
self._learning_rate = theano.shared(
sfloatX(learning_rate), name='learning_rate')
self.logger.info(
"Learning rate initialised to {:.1E}".format(learning_rate))
self.learning_rate_changes_by_iteration = none_to_dict(
learning_rate_changes_by_iteration)
self.updates_kwargs = none_to_dict(updates_kwargs)
self.experiment_name = experiment_name
self.validation_interval = validation_interval
self.save_plot_interval = save_plot_interval
self.loss_function = loss_function
self.layer_changes = none_to_dict(layer_changes)
self.epoch_callbacks = none_to_dict(epoch_callbacks)
self.do_save_activations = do_save_activations
self.plotter = plotter
self.plotter.net = self
self.auto_reshape = auto_reshape
self.set_csv_filenames()
self.generate_validation_data_and_set_shapes()
self.validation_costs = []
self.training_costs = []
self.training_costs_metadata = []
self.layers = []
self.layer_labels = {}
# Shape is (number of examples per batch,
# maximum number of time steps per example,
# number of features per example)
input_layer = InputLayer(shape=self.input_shape)
self.layer_labels['input'] = input_layer
self.layers.append(input_layer)
self.add_layers(layers_config)
self.logger.info(
"Done initialising network for " + self.experiment_name)
def set_csv_filenames(self):
self.csv_filenames = {
'training_costs': self.experiment_name + "_training_costs.csv",
'validation_costs': self.experiment_name + "_validation_costs.csv",
'training_costs_metadata':
self.experiment_name + "_training_costs_metadata.csv",
'best_costs': self.experiment_name + "_best_costs.txt",
}
def generate_validation_data_and_set_shapes(self):
# Generate a "validation" sequence whose cost we will compute
self.validation_batch = self.source.validation_data()
self.X_val, self.y_val = self.validation_batch.data
self.input_shape = self.X_val.shape
self.n_seq_per_batch = self.input_shape[0]
self.output_shape = self.y_val.shape
self.n_outputs = self.output_shape[-1]
def add_layers(self, layers_config):
for layer_config in layers_config:
layer_type = layer_config.pop('type')
layer_label = layer_config.pop('label', None)
# Reshape if necessary
if self.auto_reshape:
prev_layer_output_shape = self.layers[-1].output_shape
n_dims = len(prev_layer_output_shape)
n_features = prev_layer_output_shape[-1]
if layer_type in RECURRENT_LAYERS:
if n_dims == 2:
seq_length = int(prev_layer_output_shape[0] /
self.source.n_seq_per_batch)
shape = (self.source.n_seq_per_batch,
seq_length,
n_features)
reshape_layer = ReshapeLayer(self.layers[-1], shape)
self.layers.append(reshape_layer)
elif layer_type in [DenseLayer, MixtureDensityLayer]:
if n_dims == 3:
# The prev layer_config was a time-aware layer_config,
# so reshape to 2-dims.
seq_length = prev_layer_output_shape[1]
shape = (self.source.n_seq_per_batch * seq_length,
n_features)
reshape_layer = ReshapeLayer(self.layers[-1], shape)
self.layers.append(reshape_layer)
# Handle references:
for k, v in layer_config.iteritems():
if isinstance(v, basestring) and v.startswith("ref:"):
v = v[4:] # remove "ref:"
label, _, attr = v.partition('.')
target_layer = self.layer_labels[label]
# layer_config[k] = getattr(target_layer, attr)
layer_config[k] = eval("target_layer.{:s}".format(attr))
print(layer_config[k])
print(type(layer_config[k]))
self.logger.info(
'Initialising layer_config : {}'.format(layer_type))
# Handle ConcatLayers
if layer_type == ConcatLayer:
incoming = [
self.layer_labels[ref]
for ref in layer_config.pop('incomings')]
else:
incoming = self.layers[-1]
# Init new layer_config
apply_batch_norm = layer_config.pop('batch_normalize', False)
layer = layer_type(incoming, **layer_config)
if apply_batch_norm:
layer = batch_norm(layer)
self.layers.append(layer)
if layer_label is not None:
self.layer_labels[layer_label] = layer
# Reshape output if necessary...
if (self.layers[-1].output_shape != self.output_shape and
layer_type != MixtureDensityLayer):
reshape_layer = ReshapeLayer(self.layers[-1], self.output_shape)
self.layers.append(reshape_layer)
self.logger.info("Total parameters = {}".format(
sum([p.get_value().size for p in
lasagne.layers.get_all_params(self.layers[-1])])))
def print_net(self):
layers = get_all_layers(self.layers[-1])
for layer in layers:
self.logger.info(str(layer))
try:
input_shape = layer.input_shape
except:
pass
else:
self.logger.info(" Input shape: {}".format(input_shape))
self.logger.info("Output shape: {}".format(layer.output_shape))
def compile(self):
self.logger.info("Compiling Theano functions...")
target_output = ndim_tensor(name='target_output', ndim=self.y_val.ndim)
network_input = ndim_tensor(name='network_input', ndim=self.X_val.ndim)
output_layer = self.layers[-1]
# Training
network_output_train = lasagne.layers.get_output(
output_layer, network_input)
loss_train = self.loss_function(network_output_train, target_output)
# Evaluation (test and validation)
network_output_eval = lasagne.layers.get_output(
output_layer, network_input, deterministic=True)
loss_eval = self.loss_function(network_output_eval, target_output)
# Updates
all_params = lasagne.layers.get_all_params(
output_layer, trainable=True)
updates = self.updates_func(
loss_train, all_params, learning_rate=self._learning_rate,
**self.updates_kwargs)
# Theano functions for training, getting output,
# and computing loss_train
self.train = theano.function(
inputs=[network_input, target_output],
outputs=loss_train,
updates=updates,
on_unused_input='warn',
allow_input_downcast=True)
deterministic_output = lasagne.layers.get_output(
output_layer, network_input, deterministic=True)
self.y_pred = theano.function(
inputs=[network_input],
outputs=deterministic_output,
on_unused_input='warn',
allow_input_downcast=True)
self.compute_cost = theano.function(
inputs=[network_input, target_output],
outputs=[loss_eval, deterministic_output],
on_unused_input='warn',
allow_input_downcast=True)
self.logger.info("Done compiling Theano functions.")
def fit(self, n_iterations=None):
# Training loop. Need to wrap this in a try-except loop so
# we can always call self.source.stop()
self.source.start()
try:
self._training_loop(n_iterations)
except:
raise
finally:
self.source.stop()
def _change_layers(self, epoch):
self.source.stop()
self.source.empty_queue()
self.logger.info("Changing layers...\nOld architecture:")
self.print_net()
layer_changes = self.layer_changes[epoch]
for layer_to_remove in range(layer_changes.get('remove_from', 0), 0):
self.logger.info(
"Removed {}".format(self.layers.pop(layer_to_remove)))
if 'callback' in layer_changes:
layer_changes['callback'](self, epoch)
self.add_layers(layer_changes['new_layers'])
self.logger.info("New architecture:")
self.print_net()
self.compile()
self.source.start()
def _save_training_costs_metadata(self):
if not self.training_costs_metadata:
return
keys = self.training_costs_metadata[-1].keys()
n_iterations = self.n_iterations()
if n_iterations == 0:
mode = 'w'
else:
mode = 'a'
with open(self.csv_filenames['training_costs_metadata'], mode) as fh:
writer = csv.DictWriter(fh, fieldnames=keys)
if n_iterations == 0:
writer.writeheader()
writer.writerow(self.training_costs_metadata[-1])
def print_and_save_training_progress(self, duration):
iteration = self.n_iterations()
train_cost = self.training_costs[-1]
validation_cost = (self.validation_costs[-1] if self.validation_costs
else None)
_write_csv_row(self.csv_filenames['training_costs'],
[iteration, train_cost, duration])
self._save_training_costs_metadata()
best_train_cost = min(self.training_costs)
best_valid_cost = min(self.validation_costs)
is_best_train = train_cost == best_train_cost
is_best_valid = validation_cost == best_valid_cost
# write bests to disk
FMT = "{:14.10f}"
N = 500
K = 25
txt = (
"BEST COSTS\n" +
("best train cost =" + FMT + " at iteration{:6d}\n").format(
best_train_cost, self.training_costs.index(best_train_cost)) +
("best valid cost =" + FMT + " at iteration{:6d}\n").format(
best_valid_cost,
self.validation_costs.index(best_valid_cost) *
self.validation_interval) +
"\n" +
"AVERAGE FOR THE TOP {:d} ITERATIONS\n".format(K) +
(" avg train cost =" + FMT + "\n").format(
np.mean(np.sort(self.training_costs)[:K])) +
(" avg valid cost =" + FMT + "\n").format(
np.mean(np.sort(self.validation_costs)[:K])) +
"\n" +
"AVERAGE COSTS FOR THE LAST {:d} ITERATIONS\n".format(N) +
(" avg train cost =" + FMT + "\n").format(
np.mean(self.training_costs[-N:])) +
(" avg valid cost =" + FMT + "\n").format(
np.mean(self.validation_costs[-N:]))
)
with open(self.csv_filenames['best_costs'], mode='w') as fh:
fh.write(txt)
# print bests to screen
print(" {:>5} | {}{:>10.6f}{} | {}{:>10.6f}{} |"
" {:>11.6f} | {:>.3f}s".format(
iteration,
ansi.BLUE if is_best_train else "",
train_cost,
ansi.ENDC if is_best_train else "",
ansi.GREEN if is_best_valid else "",
validation_cost,
ansi.ENDC if is_best_valid else "",
train_cost / validation_cost,
duration
))
if np.isnan(train_cost):
msg = "training cost is NaN at iteration {}!".format(iteration)
self.logger.error(msg)
raise TrainingError(msg)
def _write_csv_headers(self, key='all'):
if key in ['all', 'training_costs']:
_write_csv_row(
self.csv_filenames['training_costs'],
row=['iteration', 'train_cost', 'duration'],
mode='w')
if key in ['all', 'validation_costs']:
_write_csv_row(
self.csv_filenames['validation_costs'],
row=['iteration', 'validation_cost'],
mode='w')
@property
def learning_rate(self):
return self._learning_rate.get_value()
@learning_rate.setter
def learning_rate(self, rate):
rate = sfloatX(rate)
self.logger.info(
"Iteration {:d}: Change learning rate to {:.1E}"
.format(self.n_iterations(), rate))
self._learning_rate.set_value(rate)
def _training_loop(self, n_iterations):
# Adapted from dnouri/nolearn/nolearn/lasagne.py
self.logger.info("Starting training for {} iterations."
.format(n_iterations))
print("""
Update | Train cost | Valid cost | Train / Val | Secs per update
--------|--------------|--------------|---------------|----------------\
""")
iteration = self.n_iterations()
if iteration == 0:
self._write_csv_headers()
while iteration != n_iterations:
t0 = time() # for calculating training duration
iteration = len(self.training_costs)
if iteration in self.learning_rate_changes_by_iteration:
self.learning_rate = (
self.learning_rate_changes_by_iteration[iteration])
if iteration in self.layer_changes:
self._change_layers(iteration)
if iteration in self.epoch_callbacks:
self.epoch_callbacks[iteration](self, iteration)
batch = self.source.get()
X, y = batch.data
train_cost = self.train(X, y).flatten()[0]
self.training_costs.append(train_cost)
if batch.metadata:
self.training_costs_metadata.append(batch.metadata)
if not iteration % self.validation_interval:
validation_cost = self.compute_cost(self.X_val, self.y_val)[0]
validation_cost = validation_cost.flatten()[0]
self.validation_costs.append(validation_cost)
_write_csv_row(
self.csv_filenames['validation_costs'],
row=[iteration, validation_cost])
if not iteration % self.save_plot_interval:
self.save()
duration = time() - t0
self.print_and_save_training_progress(duration)
self.logger.info("Finished training")
def save(self):
self.logger.info("Saving plots...")
try:
self.plotter.plot_all()
except:
self.logger.exception("")
self.logger.info("Saving params...")
try:
self.save_params()
except:
self.logger.exception("")
self.logger.info("Saving activations...")
try:
self.save_activations()
except:
self.logger.exception("")
self.logger.info("Finished saving.")
def n_iterations(self):
return max(len(self.training_costs) - 1, 0)
def save_params(self, filename=None):
"""
Save it to HDF in the following format:
/epoch<N>/L<I>_<type>/P<I>_<name>
"""
if filename is None:
filename = self.experiment_name + ".hdf5"
mode = 'w' if self.n_iterations() == 0 else 'a'
f = h5py.File(filename, mode=mode)
epoch_name = 'epoch{:06d}'.format(self.n_iterations())
try:
epoch_group = f.create_group(epoch_name)
except ValueError:
self.logger.exception("Cannot save params!")
f.close()
return
layers = get_all_layers(self.layers[-1])
for layer_i, layer in enumerate(layers):
params = layer.get_params()
if not params:
continue
layer_name = 'L{:02d}_{}'.format(layer_i, layer.__class__.__name__)
layer_group = epoch_group.create_group(layer_name)
for param_i, param in enumerate(params):
param_name = 'P{:02d}'.format(param_i)
if param.name:
param_name += "_" + param.name
data = param.get_value()
layer_group.create_dataset(
param_name, data=data, compression="gzip")
f.close()
def load_params(self, iteration, path=None):
"""
Load params from HDF in the following format:
/epoch<N>/L<I>_<type>/P<I>_<name>
"""
# Process function parameters
filename = self.experiment_name + ".hdf5"
if path is not None:
filename = join(path, filename)
self.logger.info('Loading params from ' + filename + '...')
f = h5py.File(filename, mode='r')
epoch_name = 'epoch{:06d}'.format(iteration)
epoch_group = f[epoch_name]
layers = get_all_layers(self.layers[-1])
for layer_i, layer in enumerate(layers):
params = layer.get_params()
if not params:
continue
layer_name = 'L{:02d}_{}'.format(layer_i, layer.__class__.__name__)
layer_group = epoch_group[layer_name]
for param_i, param in enumerate(params):
param_name = 'P{:02d}'.format(param_i)
if param.name:
param_name += "_" + param.name
data = layer_group[param_name]
param.set_value(data.value)
f.close()
self.logger.info('Done loading params from ' + filename + '.')
# LOAD COSTS
def load_csv(key, limit):
filename = self.csv_filenames[key]
if path is not None:
filename = join(path, filename)
data = np.genfromtxt(filename, delimiter=',', skip_header=1)
data = data[:limit, :]
# overwrite costs file
self._write_csv_headers(key)
with open(filename, mode='a') as fh:
np.savetxt(fh, data, delimiter=',')
return list(data[:, 1])
self.training_costs = load_csv('training_costs', iteration)
self.validation_costs = load_csv(
'validation_costs', iteration // self.validation_interval)
# LOAD TRAINING COSTS METADATA
metadata_fname = self.csv_filenames['training_costs_metadata']
if path is not None:
metadata_fname = join(path, metadata_fname)
try:
metadata_fh = open(metadata_fname, 'r')
except IOError:
pass
else:
reader = csv.DictReader(metadata_fh)
training_costs_metadata = [row for row in reader]
keys = training_costs_metadata[-1].keys()
metadata_fh.close()
self.training_costs_metadata = training_costs_metadata[:iteration]
if len(training_costs_metadata) > iteration:
# Overwrite old file
with open(metadata_fname, 'w') as metadata_fh:
writer = csv.DictWriter(metadata_fh, keys)
writer.writeheader()
writer.writerows(self.training_costs_metadata)
# set learning rate
if self.learning_rate_changes_by_iteration:
keys = self.learning_rate_changes_by_iteration.keys()
keys.sort(reverse=True)
for key in keys:
if key < iteration:
self.learning_rate = (
self.learning_rate_changes_by_iteration[key])
break
# epoch_callbacks
callbacks_to_call = [
key for key in self.epoch_callbacks.keys() if key < iteration]
for callback_iteration in callbacks_to_call:
self.epoch_callbacks[callback_iteration](self, callback_iteration)
def save_activations(self):
if not self.do_save_activations:
return
filename = self.experiment_name + "_activations.hdf5"
mode = 'w' if self.n_iterations() == 0 else 'a'
f = h5py.File(filename, mode=mode)
epoch_name = 'epoch{:06d}'.format(self.n_iterations())
try:
epoch_group = f.create_group(epoch_name)
except ValueError:
self.logger.exception("Cannot save params!")
f.close()
return
layers = get_all_layers(self.layers[-1])
for layer_i, layer in enumerate(layers):
# We only care about layers with params
if not (layer.get_params() or isinstance(layer, FeaturePoolLayer)):
continue
output = lasagne.layers.get_output(layer, self.X_val).eval()
n_features = output.shape[-1]
seq_length = int(output.shape[0] / self.source.n_seq_per_batch)
if isinstance(layer, DenseLayer):
shape = (self.source.n_seq_per_batch, seq_length, n_features)
output = output.reshape(shape)
elif isinstance(layer, Conv1DLayer):
output = output.transpose(0, 2, 1)
layer_name = 'L{:02d}_{}'.format(layer_i, layer.__class__.__name__)
epoch_group.create_dataset(
layer_name, data=output, compression="gzip")
# save validation data
if self.n_iterations() == 0:
f.create_dataset(
'validation_data', data=self.X_val, compression="gzip")
f.close()
def _write_csv_row(filename, row, mode='a'):
with open(filename, mode=mode) as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(row)
"""
Emacs variables
Local Variables:
compile-command: "rsync -uvzr --progress --exclude '.git' --exclude '.ropeproject' --exclude '*/.ipynb_checkpoints' --exclude '*/flycheck_*.py' /home/jack/workspace/python/neuralnilm_prototype/ /mnt/sshfs/imperial/workspace/python/neuralnilm_prototype/"
End:
"""
| mit |
thesuperzapper/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
DonBeo/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
amolkahat/pandas | pandas/tests/test_base.py | 2 | 46356 | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.compat as compat
from pandas.core.dtypes.common import (
is_object_dtype, is_datetimetz, is_datetime64_dtype,
needs_i8_conversion)
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
PeriodIndex, Timedelta, IntervalIndex, Interval,
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.accessor import PandasDelegate
from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to assert_raises_regex
# (after the Exception kind).
tm.assert_raises_regex(
TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate(object):
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ='property'
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._methods,
typ='method'
)
delegate = self.Delegate(self.Delegator())
def f():
delegate.foo
pytest.raises(TypeError, f)
def f():
delegate.foo = 5
pytest.raises(TypeError, f)
def f():
delegate.foo()
pytest.raises(TypeError, f)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops(object):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
pytest.raises(TypeError, lambda: getattr(o, op))
else:
pytest.raises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
from pandas import DataFrame, Panel
op_map = {'add': '+',
'sub': '-',
'mul': '*',
'mod': '%',
'pow': '**',
'truediv': '/',
'floordiv': '//'}
for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',
'floordiv']:
for klass in [Series, DataFrame, Panel]:
operand1 = klass.__name__.lower()
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
assert expected_str in getattr(klass, 'r' + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super(TestIndexOps, self).setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
# assert result.iat[0]
# assert result.iat[1]
if (is_datetime64_dtype(o) or is_datetimetz(o)):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ['shape', 'dtype', 'T', 'nbytes']:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ['flags', 'strides', 'itemsize']:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, 'base')
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_ops(self):
for op in ['max', 'min']:
for o in self.objs:
result = getattr(o, op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(
ordinal=getattr(o._ndarray_values, op)(),
freq=o.freq)
try:
assert result == expected
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype('M8[ns]').astype('int64')
assert result.value == expected
def test_nanops(self):
# GH 7261
for op in ['max', 'min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
assert getattr(obj, op)() == 2.0
obj = klass([np.nan])
assert pd.isna(getattr(obj, op)())
obj = klass([])
assert pd.isna(getattr(obj, op)())
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
assert getattr(obj, op)() == datetime(2011, 11, 1)
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, op)(), datetime(2011, 11, 1)
# argmin/max
obj = Index(np.arange(5, dtype='int64'))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),
pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
rep = np.repeat(values, range(1, len(o) + 1))
o = klass(rep, index=idx, name='a')
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(range(10, 0, -1), index=expected_index,
dtype='int64', name='a')
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == 'a'
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
elif is_datetimetz(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
orig._values.astype(object).values)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert o.nunique() == len(np.unique(o.values))
def test_value_counts_unique_nunique_null(self):
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetimetz(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = iNaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = 'a'
else:
if is_datetimetz(o):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name='a')
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype='int64', name='a')
expected_s = Series(list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype='int64', name='a')
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == 'a'
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == 'a'
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result,
Index(values[1:], name='a'))
elif is_datetimetz(o):
# unable to compare NaT / nan
vals = values[2:].astype(object).values
tm.assert_numpy_array_equal(result[1:], vals)
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
# bins
pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0],
index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,
'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(['a', 'b', np.nan, 'd'])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected,
check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]),
check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize('klass', [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',
'xxyyzz20100101EGG', 'xxyyww20090101EGG',
'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3],
names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
s.name = None
idx = pd.to_datetime(['2010-01-01 00:00:00',
'2008-09-09 00:00:00',
'2009-01-01 00:00:00'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(['2010-01-01 00:00:00',
'2009-01-01 00:00:00',
'2008-09-09 00:00:00'],
dtype='datetime64[ns]')
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == 'datetime64[ns]'
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == 'datetime64[ns]'
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name='dt')
result = td.value_counts()
expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'], name='dt')
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name='dt')
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
labels, uniques = o.factorize()
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig),
check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques,
check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype=np.intp)
labels, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig).sort_values(),
check_names=False)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],
np.intp)
labels, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name='a')
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True],
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with tm.assert_raises_regex(
TypeError, r"drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
else:
expected = Series([False] * len(original),
index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep='last'), expected)
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(s.drop_duplicates(keep=False),
s[~np.array(base)])
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],
'b': [2, 2, np.nan, np.nan, np.nan],
'c': [3, 3, np.nan, np.nan, 'three'],
'd': [1, 2, 3, 4, 4],
'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),
datetime(2015, 2, 1), pd.NaT, pd.NaT]
})
for column in df.columns:
for keep in ['first', 'last', False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert ((o.memory_usage(index=False) +
o.index.memory_usage()) ==
o.memory_usage(index=True))
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(obj.transpose(), obj)
else:
tm.assert_series_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, 1)
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(np.transpose(obj), obj)
else:
tm.assert_series_equal(np.transpose(obj), obj)
tm.assert_raises_regex(ValueError, self.errmsg,
np.transpose, obj, axes=1)
class TestNoNewAttributesMixin(object):
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
def f():
t.b = "test"
pytest.raises(AttributeError, f)
assert not hasattr(t, "b")
class TestToIterable(object):
# test that we convert an iterable to python types
dtypes = [
('int8', (int, long)),
('int16', (int, long)),
('int32', (int, long)),
('int64', (int, long)),
('uint8', (int, long)),
('uint16', (int, long)),
('uint32', (int, long)),
('uint64', (int, long)),
('float16', float),
('float32', float),
('float64', float),
('datetime64[ns]', Timestamp),
('datetime64[ns, US/Eastern]', Timestamp),
('timedelta64[ns]', Timedelta)]
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype, obj',
[
('object', object, 'a'),
('object', (int, long), 1),
('category', object, 'a'),
('category', (int, long), 1)])
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_object_and_category(self, typ, method,
dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test items / iteritems yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.iteritems())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype',
dtypes + [
('object', (int, long)),
('category', (int, long))])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp('1999-12-31'),
Timestamp('2000-12-31')])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')]
s = Series(vals)
assert s.dtype == 'datetime64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [Timestamp('2011-01-01', tz='US/Eastern'),
Timestamp('2011-01-02', tz='US/Eastern')]
s = Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta('1 days'), Timedelta('2 days')]
s = Series(vals)
assert s.dtype == 'timedelta64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = Series(vals)
assert s.dtype == 'Period[M]'
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == 'M'
assert res == exp
@pytest.mark.parametrize('array, expected_type, dtype', [
(np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'),
(np.array(['a', 'b']), np.ndarray, 'object'),
(pd.Categorical(['a', 'b']), pd.Categorical, 'category'),
(pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'),
(pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex,
'datetime64[ns, US/Central]'),
(pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'),
(pd.PeriodIndex([2018, 2019], freq='A'), pd.core.arrays.PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC")),
(pd.IntervalIndex.from_breaks([0, 1, 2]), pd.core.arrays.IntervalArray,
'interval'),
])
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
if isinstance(l_values, np.ndarray):
tm.assert_numpy_array_equal(l_values, r_values)
elif isinstance(l_values, pd.Index):
tm.assert_index_equal(l_values, r_values)
elif pd.api.types.is_categorical(l_values):
tm.assert_categorical_equal(l_values, r_values)
elif pd.api.types.is_period_dtype(l_values):
tm.assert_period_array_equal(l_values, r_values)
elif pd.api.types.is_interval_dtype(l_values):
tm.assert_interval_array_equal(l_values, r_values)
else:
raise TypeError("Unexpected type {}".format(type(l_values)))
assert l_values.dtype == dtype
assert r_values.dtype == dtype
@pytest.mark.parametrize('array, expected', [
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(['0', '1']), np.array(['0', '1'], dtype=object)),
(pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')),
(pd.DatetimeIndex(['2017-01-01T00:00:00']),
np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')),
(pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"),
np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')),
(pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')),
(pd.PeriodIndex(['2017', '2018'], freq='D'),
np.array([17167, 17532], dtype=np.int64)),
])
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/sparse/tests/test_pivot.py | 7 | 2417 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestPivotTable(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': [np.nan, np.nan, 1, 2,
np.nan, 1, np.nan, np.nan]})
self.sparse = self.dense.to_sparse()
def test_pivot_table(self):
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='C')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='C')
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='E')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='E')
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='E', aggfunc='mean')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='E', aggfunc='mean')
tm.assert_frame_equal(res_sparse, res_dense)
# ToDo: sum doesn't handle nan properly
# res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
# values='E', aggfunc='sum')
# res_dense = pd.pivot_table(self.dense, index='A', columns='B',
# values='E', aggfunc='sum')
# tm.assert_frame_equal(res_sparse, res_dense)
def test_pivot_table_multi(self):
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values=['D', 'E'])
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values=['D', 'E'])
tm.assert_frame_equal(res_sparse, res_dense)
| mit |
shyamalschandra/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
andaag/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
lshain-android-source/external-blktrace | btt/btt_plot.py | 43 | 11282 | #! /usr/bin/env python
#
# btt_plot.py: Generate matplotlib plots for BTT generate data files
#
# (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
btt_plot.py: Generate matplotlib plots for BTT generated data files
Files handled:
AQD - Average Queue Depth Running average of queue depths
BNOS - Block numbers accessed Markers for each block
Q2D - Queue to Issue latencies Running averages
D2C - Issue to Complete latencies Running averages
Q2C - Queue to Complete latencies Running averages
Usage:
btt_plot_aqd.py equivalent to: btt_plot.py -t aqd <type>=aqd
btt_plot_bnos.py equivalent to: btt_plot.py -t bnos <type>=bnos
btt_plot_q2d.py equivalent to: btt_plot.py -t q2d <type>=q2d
btt_plot_d2c.py equivalent to: btt_plot.py -t d2c <type>=d2c
btt_plot_q2c.py equivalent to: btt_plot.py -t q2c <type>=q2c
Arguments:
[ -A | --generate-all ] Default: False
[ -L | --no-legend ] Default: Legend table produced
[ -o <file> | --output=<file> ] Default: <type>.png
[ -T <string> | --title=<string> ] Default: Based upon <type>
[ -v | --verbose ] Default: False
<data-files...>
The -A (--generate-all) argument is different: when this is specified,
an attempt is made to generate default plots for all 5 types (aqd, bnos,
q2d, d2c and q2c). It will find files with the appropriate suffix for
each type ('aqd.dat' for example). If such files are found, a plot for
that type will be made. The output file name will be the default for
each type. The -L (--no-legend) option will be obeyed for all plots,
but the -o (--output) and -T (--title) options will be ignored.
"""
__author__ = 'Alan D. Brunelle <alan.brunelle@hp.com>'
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import getopt, glob, os, sys
import matplotlib.pyplot as plt
plot_size = [10.9, 8.4] # inches...
add_legend = True
generate_all = False
output_file = None
title_str = None
type = None
verbose = False
types = [ 'aqd', 'q2d', 'd2c', 'q2c', 'bnos' ]
progs = [ 'btt_plot_%s.py' % t for t in types ]
get_base = lambda file: file[file.find('_')+1:file.rfind('_')]
#------------------------------------------------------------------------------
def fatal(msg):
"""Generate fatal error message and exit"""
print >>sys.stderr, 'FATAL: %s' % msg
sys.exit(1)
#----------------------------------------------------------------------
def get_data(files):
"""Retrieve data from files provided.
Returns a database containing:
'min_x', 'max_x' - Minimum and maximum X values found
'min_y', 'max_y' - Minimum and maximum Y values found
'x', 'y' - X & Y value arrays
'ax', 'ay' - Running average over X & Y --
if > 10 values provided...
"""
#--------------------------------------------------------------
def check(mn, mx, v):
"""Returns new min, max, and float value for those passed in"""
v = float(v)
if mn == None or v < mn: mn = v
if mx == None or v > mx: mx = v
return mn, mx, v
#--------------------------------------------------------------
def avg(xs, ys):
"""Computes running average for Xs and Ys"""
#------------------------------------------------------
def _avg(vals):
"""Computes average for array of values passed"""
total = 0.0
for val in vals:
total += val
return total / len(vals)
#------------------------------------------------------
if len(xs) < 1000:
return xs, ys
axs = [xs[0]]
ays = [ys[0]]
_xs = [xs[0]]
_ys = [ys[0]]
x_range = (xs[-1] - xs[0]) / 100
for idx in range(1, len(ys)):
if (xs[idx] - _xs[0]) > x_range:
axs.append(_avg(_xs))
ays.append(_avg(_ys))
del _xs, _ys
_xs = [xs[idx]]
_ys = [ys[idx]]
else:
_xs.append(xs[idx])
_ys.append(ys[idx])
if len(_xs) > 1:
axs.append(_avg(_xs))
ays.append(_avg(_ys))
return axs, ays
#--------------------------------------------------------------
global verbose
db = {}
min_x = max_x = min_y = max_y = None
for file in files:
if not os.path.exists(file):
fatal('%s not found' % file)
elif verbose:
print 'Processing %s' % file
xs = []
ys = []
for line in open(file, 'r'):
f = line.rstrip().split(None)
if line.find('#') == 0 or len(f) < 2:
continue
(min_x, max_x, x) = check(min_x, max_x, f[0])
(min_y, max_y, y) = check(min_y, max_y, f[1])
xs.append(x)
ys.append(y)
db[file] = {'x':xs, 'y':ys}
if len(xs) > 10:
db[file]['ax'], db[file]['ay'] = avg(xs, ys)
else:
db[file]['ax'] = db[file]['ay'] = None
db['min_x'] = min_x
db['max_x'] = max_x
db['min_y'] = min_y
db['max_y'] = max_y
return db
#----------------------------------------------------------------------
def parse_args(args):
"""Parse command line arguments.
Returns list of (data) files that need to be processed -- /unless/
the -A (--generate-all) option is passed, in which case superfluous
data files are ignored...
"""
global add_legend, output_file, title_str, type, verbose
global generate_all
prog = args[0][args[0].rfind('/')+1:]
if prog == 'btt_plot.py':
pass
elif not prog in progs:
fatal('%s not a valid command name' % prog)
else:
type = prog[prog.rfind('_')+1:prog.rfind('.py')]
s_opts = 'ALo:t:T:v'
l_opts = [ 'generate-all', 'type', 'no-legend', 'output', 'title',
'verbose' ]
try:
(opts, args) = getopt.getopt(args[1:], s_opts, l_opts)
except getopt.error, msg:
print >>sys.stderr, msg
fatal(__doc__)
for (o, a) in opts:
if o in ('-A', '--generate-all'):
generate_all = True
elif o in ('-L', '--no-legend'):
add_legend = False
elif o in ('-o', '--output'):
output_file = a
elif o in ('-t', '--type'):
if not a in types:
fatal('Type %s not supported' % a)
type = a
elif o in ('-T', '--title'):
title_str = a
elif o in ('-v', '--verbose'):
verbose = True
if type == None and not generate_all:
fatal('Need type of data files to process - (-t <type>)')
return args
#------------------------------------------------------------------------------
def gen_title(fig, type, title_str):
"""Sets the title for the figure based upon the type /or/ user title"""
if title_str != None:
pass
elif type == 'aqd':
title_str = 'Average Queue Depth'
elif type == 'bnos':
title_str = 'Block Numbers Accessed'
elif type == 'q2d':
title_str = 'Queue (Q) To Issue (D) Average Latencies'
elif type == 'd2c':
title_str = 'Issue (D) To Complete (C) Average Latencies'
elif type == 'q2c':
title_str = 'Queue (Q) To Complete (C) Average Latencies'
title = fig.text(.5, .95, title_str, horizontalalignment='center')
title.set_fontsize('large')
#------------------------------------------------------------------------------
def gen_labels(db, ax, type):
"""Generate X & Y 'axis'"""
#----------------------------------------------------------------------
def gen_ylabel(ax, type):
"""Set the Y axis label based upon the type"""
if type == 'aqd':
str = 'Number of Requests Queued'
elif type == 'bnos':
str = 'Block Number'
else:
str = 'Seconds'
ax.set_ylabel(str)
#----------------------------------------------------------------------
xdelta = 0.1 * (db['max_x'] - db['min_x'])
ydelta = 0.1 * (db['max_y'] - db['min_y'])
ax.set_xlim(db['min_x'] - xdelta, db['max_x'] + xdelta)
ax.set_ylim(db['min_y'] - ydelta, db['max_y'] + ydelta)
ax.set_xlabel('Runtime (seconds)')
ax.grid(True)
gen_ylabel(ax, type)
#------------------------------------------------------------------------------
def generate_output(type, db):
"""Generate the output plot based upon the type and database"""
#----------------------------------------------------------------------
def color(idx, style):
"""Returns a color/symbol type based upon the index passed."""
colors = [ 'b', 'g', 'r', 'c', 'm', 'y', 'k' ]
l_styles = [ '-', ':', '--', '-.' ]
m_styles = [ 'o', '+', '.', ',', 's', 'v', 'x', '<', '>' ]
color = colors[idx % len(colors)]
if style == 'line':
style = l_styles[(idx / len(l_styles)) % len(l_styles)]
elif style == 'marker':
style = m_styles[(idx / len(m_styles)) % len(m_styles)]
return '%s%s' % (color, style)
#----------------------------------------------------------------------
def gen_legends(a, legends):
leg = ax.legend(legends, 'best', shadow=True)
frame = leg.get_frame()
frame.set_facecolor('0.80')
for t in leg.get_texts():
t.set_fontsize('xx-small')
#----------------------------------------------------------------------
global add_legend, output_file, title_str, verbose
if output_file != None:
ofile = output_file
else:
ofile = '%s.png' % type
if verbose:
print 'Generating plot into %s' % ofile
fig = plt.figure(figsize=plot_size)
ax = fig.add_subplot(111)
gen_title(fig, type, title_str)
gen_labels(db, ax, type)
idx = 0
if add_legend:
legends = []
else:
legends = None
keys = []
for file in db.iterkeys():
if not file in ['min_x', 'max_x', 'min_y', 'max_y']:
keys.append(file)
keys.sort()
for file in keys:
dat = db[file]
if type == 'bnos':
ax.plot(dat['x'], dat['y'], color(idx, 'marker'),
markersize=1)
elif dat['ax'] == None:
continue # Don't add legend
else:
ax.plot(dat['ax'], dat['ay'], color(idx, 'line'),
linewidth=1.0)
if add_legend:
legends.append(get_base(file))
idx += 1
if add_legend and len(legends) > 0:
gen_legends(ax, legends)
plt.savefig(ofile)
#------------------------------------------------------------------------------
def get_files(type):
"""Returns the list of files for the -A option based upon type"""
if type == 'bnos':
files = []
for fn in glob.glob('*c.dat'):
for t in [ 'q2q', 'd2d', 'q2c', 'd2c' ]:
if fn.find(t) >= 0:
break
else:
files.append(fn)
else:
files = glob.glob('*%s.dat' % type)
return files
#------------------------------------------------------------------------------
if __name__ == '__main__':
files = parse_args(sys.argv)
if generate_all:
output_file = title_str = type = None
for t in types:
files = get_files(t)
if len(files) == 0:
continue
elif t != 'bnos':
generate_output(t, get_data(files))
continue
for file in files:
base = get_base(file)
title_str = 'Block Numbers Accessed: %s' % base
output_file = 'bnos_%s.png' % base
generate_output(t, get_data([file]))
elif len(files) < 1:
fatal('Need data files to process')
else:
generate_output(type, get_data(files))
sys.exit(0)
| gpl-2.0 |
Vishruit/DDP_models | code/create_label_tags.py | 1 | 2811 | import random
import numpy as np
import os
import shutil
import time
import sys
np.random.seed(1)
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy.misc
def getJPGFilePaths(directory,excludeFiles):
file_paths = []
file_name = []
file_loc = []
global ext
for (root, directories, files) in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
fileloc = os.path.join(root)
if filename.endswith("." + ext) and filename!=excludeFiles:
file_paths.append(filepath) # Add it to the list.
file_name.append(filename)
file_loc.append(fileloc)
# break #To do a deep search in all the sub-directories
return file_paths, file_name, file_loc
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
return 0
return 1
def apply_label_tag(pixel):
if pixel == 0:
return 1 # Background pixel
else:
tag = int(pixel / 25 ) + 2 # +1 for background and +1 for indexing from 1
if tag > 11:
return 11
else:
return tag
def create_label_tags(imgfilepaths, imgfilenames, imgfilelocs):
global datasetLocation, ext, height, width
imgfilelocs = np.unique(imgfilelocs)
num_vids = len(imgfilelocs)
i = 1
for fileloc in imgfilelocs:
print fileloc
relative_folderpath = fileloc[len(datasetLocation):len(fileloc)]
data_fileloc_actual = datasetLocation + relative_folderpath
[file_paths, file_name, file_loc] = getJPGFilePaths(data_fileloc_actual,[])
totalSize = len(file_name)
print totalSize
framesToLoad = range(1,totalSize+1,1)
framesToLoad = np.sort(framesToLoad)
src_file_name_data = fpath
im = Image.open(src_file_name_data)
temp = np.zeros(im.size)
im_array = np.array(im)
for i in range(im_array.shape[0]):
for j in range(im_array.shape[1]):
im_array[i,j] = apply_label_tag(im_array[i,j])
scipy.misc.imsave(src_file_name_data, temp)
print(i)
i += 1
pass
excludeFiles = []
ext = 'png'
# height, width = 256,320
datasetLocation = '/partition1/vishruit/soft/DATA_caffe/DATA_mapped'
# Actual filepaths and filenames list
[file_paths_label, file_names_label, file_locs_label] = getJPGFilePaths(datasetLocation, excludeFiles)
# # Actual filepaths and filenames list
print file_paths_label[1]
# print file_paths_data[1]
# numInstances = len(file_paths)
print 'Start'
create_label_tags(file_paths_label, file_names_label, file_locs_label)
print 'All is well !!! Finished.'
| gpl-3.0 |
JesseLivezey/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.