repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
eranchetz/nupic | examples/audiostream/audiostream_tp.py | 32 | 9991 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
See README.md for details.
"""
"""
numpy - the language of pyaudio (& everything else)
pyaudio - access to the mic via the soundcard
pyplot - to plot the sound frequencies
bitmaparray - encodes an array of indices into an SDR
TP10X2 - the C++ optimized temporal pooler (TP)
"""
import numpy
import pyaudio
import matplotlib.pyplot as plt
from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder
from nupic.research.TP10X2 import TP10X2 as TP
class Visualizations:
def calcAnomaly(self, actual, predicted):
"""
Calculates the anomaly of two SDRs
Uses the equation presented on the wiki:
https://github.com/numenta/nupic/wiki/Anomaly-Score-Memo
To put this in terms of the temporal pooler:
A is the actual input array at a given timestep
P is the predicted array that was produced from the previous timestep(s)
[A - (A && P)] / [A]
Rephrasing as questions:
What bits are on in A that are not on in P?
How does that compare to total on bits in A?
Outputs 0 is there's no difference between P and A.
Outputs 1 if P and A are totally distinct.
Not a perfect metric - it doesn't credit proximity
Next step: combine with a metric for a spatial pooler
"""
combined = numpy.logical_and(actual, predicted)
delta = numpy.logical_xor(actual,combined)
delta_score = sum(delta)
actual_score = float(sum(actual))
return delta_score / actual_score
def compareArray(self, actual, predicted):
"""
Produce an array that compares the actual & predicted
'A' - actual
'P' - predicted
'E' - expected (both actual & predicted
' ' - neither an input nor predicted
"""
compare = []
for i in range(actual.size):
if actual[i] and predicted[i]:
compare.append('E')
elif actual[i]:
compare.append('A')
elif predicted[i]:
compare.append('P')
else:
compare.append(' ')
return compare
def hashtagAnomaly(self, anomaly):
"""
Basic printout method to visualize the anomaly score (scale: 1 - 50 #'s)
"""
hashcount = '#'
for i in range(int(anomaly / 0.02)):
hashcount += '#'
for j in range(int((1 - anomaly) / 0.02)):
hashcount += '.'
return hashcount
class AudioStream:
def __init__(self):
"""
Instantiate temporal pooler, encoder, audio sampler, filter, & freq plot
"""
self.vis = Visualizations()
"""
The number of columns in the input and therefore the TP
2**9 = 512
Trial and error pulled that out
numCols should be tested during benchmarking
"""
self.numCols = 2**9
sparsity = 0.10
self.numInput = int(self.numCols * sparsity)
"""
Create a bit map encoder
From the encoder's __init__ method:
1st arg: the total bits in input
2nd arg: the number of bits used to encode each input bit
"""
self.e = SparsePassThroughEncoder(self.numCols, 1)
"""
Sampling details
rate: The sampling rate in Hz of my soundcard
buffersize: The size of the array to which we will save audio segments (2^12 = 4096 is very good)
secToRecord: The length of each sampling
buffersToRecord: how many multiples of buffers are we recording?
"""
rate=44100
secToRecord=.1
self.buffersize=2**12
self.buffersToRecord=int(rate*secToRecord/self.buffersize)
if not self.buffersToRecord:
self.buffersToRecord=1
"""
Filters in Hertz
highHertz: lower limit of the bandpass filter, in Hertz
lowHertz: upper limit of the bandpass filter, in Hertz
max lowHertz = (buffersize / 2 - 1) * rate / buffersize
"""
highHertz = 500
lowHertz = 10000
"""
Convert filters from Hertz to bins
highpass: convert the highHertz into a bin for the FFT
lowpass: convert the lowHertz into a bin for the FFt
NOTES:
highpass is at least the 1st bin since most mics only pick up >=20Hz
lowpass is no higher than buffersize/2 - 1 (highest array index)
passband needs to be wider than size of numInput - not checking for that
"""
self.highpass = max(int(highHertz * self.buffersize / rate),1)
self.lowpass = min(int(lowHertz * self.buffersize / rate), self.buffersize/2 - 1)
"""
The call to create the temporal pooler region
"""
self.tp = TP(numberOfCols=self.numCols, cellsPerColumn=4,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=10, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.07,
activationThreshold=8,
globalDecay=0.02, burnIn=2,
checkSynapseConsistency=False,
pamLength=100)
"""
Creating the audio stream from our mic
"""
p = pyaudio.PyAudio()
self.inStream = p.open(format=pyaudio.paInt32,channels=1,rate=rate,input=True,frames_per_buffer=self.buffersize)
"""
Setting up the array that will handle the timeseries of audio data from our input
"""
self.audio = numpy.empty((self.buffersToRecord*self.buffersize),dtype="uint32")
"""
Print out the inputs
"""
print "Number of columns:\t" + str(self.numCols)
print "Max size of input:\t" + str(self.numInput)
print "Sampling rate (Hz):\t" + str(rate)
print "Passband filter (Hz):\t" + str(highHertz) + " - " + str(lowHertz)
print "Passband filter (bin):\t" + str(self.highpass) + " - " + str(self.lowpass)
print "Bin difference:\t\t" + str(self.lowpass - self.highpass)
print "Buffersize:\t\t" + str(self.buffersize)
"""
Setup the plot
Use the bandpass filter frequency range as the x-axis
Rescale the y-axis
"""
plt.ion()
bin = range(self.highpass,self.lowpass)
xs = numpy.arange(len(bin))*rate/self.buffersize + highHertz
self.freqPlot = plt.plot(xs,xs)[0]
plt.ylim(0, 10**12)
while True:
self.processAudio()
def processAudio (self):
"""
Sample audio, encode, send it to the TP
Pulls the audio from the mic
Conditions that audio as an SDR
Computes a prediction via the TP
Update the visualizations
"""
"""
Cycle through the multiples of the buffers we're sampling
Sample audio to store for each frame in buffersize
Mic voltage-level timeseries is saved as 32-bit binary
Convert that 32-bit binary into integers, and save to array for the FFT
"""
for i in range(self.buffersToRecord):
try:
audioString = self.inStream.read(self.buffersize)
except IOError:
print "Overflow error from 'audiostring = inStream.read(buffersize)'. Try decreasing buffersize."
quit()
self.audio[i*self.buffersize:(i + 1)*self.buffersize] = numpy.fromstring(audioString,dtype = "uint32")
"""
Get int array of strength for each bin of frequencies via fast fourier transform
Get the indices of the strongest frequencies (the top 'numInput')
Scale the indices so that the frequencies fit to within numCols
Pick out the unique indices (we've reduced the mapping, so we likely have multiples)
Encode those indices into an SDR via the SparsePassThroughEncoder
Cast the SDR as a float for the TP
"""
ys = self.fft(self.audio, self.highpass, self.lowpass)
fs = numpy.sort(ys.argsort()[-self.numInput:])
rfs = fs.astype(numpy.float32) / (self.lowpass - self.highpass) * self.numCols
ufs = numpy.unique(rfs)
actualInt = self.e.encode(ufs)
actual = actualInt.astype(numpy.float32)
"""
Pass the SDR to the TP
Collect the prediction SDR from the TP
Pass the prediction & actual SDRS to the anomaly calculator & array comparer
Update the frequency plot
"""
self.tp.compute(actual, enableLearn = True, computeInfOutput = True)
predictedInt = self.tp.getPredictedState().max(axis=1)
compare = self.vis.compareArray(actualInt, predictedInt)
anomaly = self.vis.calcAnomaly(actualInt, predictedInt)
print "." . join(compare)
print self.vis.hashtagAnomaly(anomaly)
self.freqPlot.set_ydata(ys)
plt.show(block = False)
plt.draw()
def fft(self, audio, highpass, lowpass):
"""
Fast fourier transform conditioning
Output:
'output' contains the strength of each frequency in the audio signal
frequencies are marked by its position in 'output':
frequency = index * rate / buffesize
output.size = buffersize/2
Method:
Use numpy's FFT (numpy.fft.fft)
Find the magnitude of the complex numbers returned (abs value)
Split the FFT array in half, because we have mirror frequencies
(they're the complex conjugates)
Use just the first half to apply the bandpass filter
Great info here: http://stackoverflow.com/questions/4364823/how-to-get-frequency-from-fft-result
"""
left,right = numpy.split(numpy.abs(numpy.fft.fft(audio)),2)
output = left[highpass:lowpass]
return output
audiostream = AudioStream()
| agpl-3.0 |
chenyyx/scikit-learn-doc-zh | examples/en/covariance/plot_outlier_detection.py | 15 | 5121 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white',
s=20, edgecolor='k')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black',
s=20, edgecolor='k')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| gpl-3.0 |
jjongbloets/julesTk | julesTk/view/plot.py | 1 | 3049 | """Implement a Frame with a matplotlib"""
from julesTk.view import *
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
class PlotFrame(Frame, object):
def __init__(self, parent):
super(PlotFrame, self).__init__(parent)
self._figure = None
self._canvas = None
self._toolbar = None
self._legend = None
self._axes = None
def _setup_figure(self, size, dpi=100):
if not isinstance(size, tuple) and not len(size) == 2:
raise ValueError("Invalid value for size (need tuple of length 2)")
f = Figure(figsize=size, dpi=dpi)
self._figure = f
def _setup_canvas(self):
if not isinstance(self.figure, Figure):
raise ValueError("Invalid figure object")
self._canvas = FigureCanvasTkAgg(self.figure, self)
self._setup_toolbar()
self.canvas.show()
self._canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
def _setup_toolbar(self):
self._toolbar = NavigationToolbar2TkAgg(self.canvas, self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def _setup_subplot(self):
self._axes = self.figure.add_subplot(111)
@property
def figure(self):
"""Returns the current add_plot figure of this Frame
:rtype: matplotlib.figure.Figure
"""
return self._figure
@property
def canvas(self):
"""Returns the current canvas of this frame
:rtype: matplotlib.backends.backend_tkagg.FigureCanvasTkAgg
"""
return self._canvas
@property
def axes(self):
"""Returns the current subplot in the figure
:rtype: matplotlib.axes.Axes
"""
return self._axes
@property
def legend(self):
return self._legend
@property
def toolbar(self):
return self._toolbar
def setup(self, size=None, dpi=100):
if size is None:
size = (5, 5)
self._setup_figure(size, dpi)
self._setup_canvas()
self._setup_subplot()
def add_legend(self):
if self.axes is not None:
self._legend = self.axes.legend(loc='best')
def draw(self):
self.canvas.draw()
def clear(self):
self.figure.clear()
self._setup_subplot()
self.canvas.draw()
class PlotView(View):
""" A view with a plot embedded.
"""
def __init__(self, parent, controller):
super(PlotView, self).__init__(parent, controller)
self._plot = None
@property
def plot(self):
""" Returns the plot frame embedded in this frame
:rtype: julesTk.view.plot.PlotFrame
"""
return self._plot
def body(self):
self.configure_grid(self)
self.setup_plot()
def setup_plot(self):
self._plot = PlotFrame(self)
self.plot.setup()
| mit |
jiegzhan/multi-class-text-classification-cnn-rnn | train.py | 1 | 6685 | import os
import sys
import json
import time
import shutil
import pickle
import logging
import data_helper
import numpy as np
import pandas as pd
import tensorflow as tf
from text_cnn_rnn import TextCNNRNN
from sklearn.model_selection import train_test_split
logging.getLogger().setLevel(logging.INFO)
def train_cnn_rnn():
input_file = sys.argv[1]
x_, y_, vocabulary, vocabulary_inv, df, labels = data_helper.load_data(input_file)
training_config = sys.argv[2]
params = json.loads(open(training_config).read())
# Assign a 300 dimension vector to each word
word_embeddings = data_helper.load_embeddings(vocabulary)
embedding_mat = [word_embeddings[word] for index, word in enumerate(vocabulary_inv)]
embedding_mat = np.array(embedding_mat, dtype = np.float32)
# Split the original dataset into train set and test set
x, x_test, y, y_test = train_test_split(x_, y_, test_size=0.1)
# Split the train set into train set and dev set
x_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=0.1)
logging.info('x_train: {}, x_dev: {}, x_test: {}'.format(len(x_train), len(x_dev), len(x_test)))
logging.info('y_train: {}, y_dev: {}, y_test: {}'.format(len(y_train), len(y_dev), len(y_test)))
# Create a directory, everything related to the training will be saved in this directory
timestamp = str(int(time.time()))
trained_dir = './trained_results_' + timestamp + '/'
if os.path.exists(trained_dir):
shutil.rmtree(trained_dir)
os.makedirs(trained_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn_rnn = TextCNNRNN(
embedding_mat=embedding_mat,
sequence_length=x_train.shape[1],
num_classes = y_train.shape[1],
non_static=params['non_static'],
hidden_unit=params['hidden_unit'],
max_pool_size=params['max_pool_size'],
filter_sizes=map(int, params['filter_sizes'].split(",")),
num_filters = params['num_filters'],
embedding_size = params['embedding_dim'],
l2_reg_lambda = params['l2_reg_lambda'])
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.RMSPropOptimizer(1e-3, decay=0.9)
grads_and_vars = optimizer.compute_gradients(cnn_rnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Checkpoint files will be saved in this directory during training
checkpoint_dir = './checkpoints_' + timestamp + '/'
if os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
def real_len(batches):
return [np.ceil(np.argmin(batch + [0]) * 1.0 / params['max_pool_size']) for batch in batches]
def train_step(x_batch, y_batch):
feed_dict = {
cnn_rnn.input_x: x_batch,
cnn_rnn.input_y: y_batch,
cnn_rnn.dropout_keep_prob: params['dropout_keep_prob'],
cnn_rnn.batch_size: len(x_batch),
cnn_rnn.pad: np.zeros([len(x_batch), 1, params['embedding_dim'], 1]),
cnn_rnn.real_len: real_len(x_batch),
}
_, step, loss, accuracy = sess.run([train_op, global_step, cnn_rnn.loss, cnn_rnn.accuracy], feed_dict)
def dev_step(x_batch, y_batch):
feed_dict = {
cnn_rnn.input_x: x_batch,
cnn_rnn.input_y: y_batch,
cnn_rnn.dropout_keep_prob: 1.0,
cnn_rnn.batch_size: len(x_batch),
cnn_rnn.pad: np.zeros([len(x_batch), 1, params['embedding_dim'], 1]),
cnn_rnn.real_len: real_len(x_batch),
}
step, loss, accuracy, num_correct, predictions = sess.run(
[global_step, cnn_rnn.loss, cnn_rnn.accuracy, cnn_rnn.num_correct, cnn_rnn.predictions], feed_dict)
return accuracy, loss, num_correct, predictions
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# Training starts here
train_batches = data_helper.batch_iter(list(zip(x_train, y_train)), params['batch_size'], params['num_epochs'])
best_accuracy, best_at_step = 0, 0
# Train the model with x_train and y_train
for train_batch in train_batches:
x_train_batch, y_train_batch = zip(*train_batch)
train_step(x_train_batch, y_train_batch)
current_step = tf.train.global_step(sess, global_step)
# Evaluate the model with x_dev and y_dev
if current_step % params['evaluate_every'] == 0:
dev_batches = data_helper.batch_iter(list(zip(x_dev, y_dev)), params['batch_size'], 1)
total_dev_correct = 0
for dev_batch in dev_batches:
x_dev_batch, y_dev_batch = zip(*dev_batch)
acc, loss, num_dev_correct, predictions = dev_step(x_dev_batch, y_dev_batch)
total_dev_correct += num_dev_correct
accuracy = float(total_dev_correct) / len(y_dev)
logging.info('Accuracy on dev set: {}'.format(accuracy))
if accuracy >= best_accuracy:
best_accuracy, best_at_step = accuracy, current_step
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logging.critical('Saved model {} at step {}'.format(path, best_at_step))
logging.critical('Best accuracy {} at step {}'.format(best_accuracy, best_at_step))
logging.critical('Training is complete, testing the best model on x_test and y_test')
# Save the model files to trained_dir. predict.py needs trained model files.
saver.save(sess, trained_dir + "best_model.ckpt")
# Evaluate x_test and y_test
saver.restore(sess, checkpoint_prefix + '-' + str(best_at_step))
test_batches = data_helper.batch_iter(list(zip(x_test, y_test)), params['batch_size'], 1, shuffle=False)
total_test_correct = 0
for test_batch in test_batches:
x_test_batch, y_test_batch = zip(*test_batch)
acc, loss, num_test_correct, predictions = dev_step(x_test_batch, y_test_batch)
total_test_correct += int(num_test_correct)
logging.critical('Accuracy on test set: {}'.format(float(total_test_correct) / len(y_test)))
# Save trained parameters and files since predict.py needs them
with open(trained_dir + 'words_index.json', 'w') as outfile:
json.dump(vocabulary, outfile, indent=4, ensure_ascii=False)
with open(trained_dir + 'embeddings.pickle', 'wb') as outfile:
pickle.dump(embedding_mat, outfile, pickle.HIGHEST_PROTOCOL)
with open(trained_dir + 'labels.json', 'w') as outfile:
json.dump(labels, outfile, indent=4, ensure_ascii=False)
params['sequence_length'] = x_train.shape[1]
with open(trained_dir + 'trained_parameters.json', 'w') as outfile:
json.dump(params, outfile, indent=4, sort_keys=True, ensure_ascii=False)
if __name__ == '__main__':
# python3 train.py ./data/train.csv.zip ./training_config.json
train_cnn_rnn()
| apache-2.0 |
henridwyer/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 57 | 13752 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
LeeKamentsky/CellProfiler | cellprofiler/modules/measureobjectradialdistribution.py | 1 | 41744 | """<b>Measure Object Radial Distribution</b> measures the radial distribution
of intensities within each object.
<hr>
Given an image with objects identified, this module measures the
intensity distribution from each object's center to its boundary
within a user-controlled number of bins, i.e. rings.
<p>The distribution is measured from the center of the object, where
the center is defined as the point farthest from any edge. The numbering
is from 1 (innermost) to <i>N</i> (outermost), where <i>N</i> is the
number of bins specified by the user.
Alternatively, if primary objects exist within the object of interest
(e.g. nuclei within cells), you can choose the center of the primary
objects as the center from which to measure the radial distribution.
This might be useful in cytoplasm-to-nucleus translocation experiments,
for example. Note that the ring widths are normalized per-object,
i.e., not necessarily a constant width across objects.</p>
<h4>Available measurements</h4>
<ul>
<li><i>FracAtD:</i> Fraction of total stain in an object at a given radius.</li>
<li><i>MeanFrac:</i> Mean fractional intensity at a given radius; calculated
as fraction of total intensity normalized by fraction of pixels at a given radius.</li>
<li><i>RadialCV:</i> Coefficient of variation of intensity within a ring, calculated
over 8 slices.</li>
</ul>
See also <b>MeasureObjectIntensity</b>.
"""
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import numpy as np
import matplotlib.cm
from numpy.ma import masked_array
from scipy.sparse import coo_matrix
import scipy.ndimage as scind
import sys
import cellprofiler.cpmodule as cpm
import cellprofiler.measurements as cpmeas
import cellprofiler.cpimage as cpi
import cellprofiler.objects as cpo
import cellprofiler.preferences as cpprefs
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.workspace as cpw
from cellprofiler.cpmath.cpmorphology import distance_to_edge
from cellprofiler.cpmath.cpmorphology import centers_of_labels
from cellprofiler.cpmath.cpmorphology import maximum_position_of_labels
from cellprofiler.cpmath.cpmorphology import color_labels
from cellprofiler.cpmath.cpmorphology import fixup_scipy_ndimage_result as fix
from cellprofiler.cpmath.propagate import propagate
C_SELF = 'These objects'
C_CENTERS_OF_OTHER_V2 = 'Other objects'
C_CENTERS_OF_OTHER = 'Centers of other objects'
C_EDGES_OF_OTHER = 'Edges of other objects'
C_ALL = [C_SELF, C_CENTERS_OF_OTHER, C_EDGES_OF_OTHER]
M_CATEGORY = 'RadialDistribution'
F_FRAC_AT_D = 'FracAtD'
F_MEAN_FRAC = 'MeanFrac'
F_RADIAL_CV = 'RadialCV'
F_ALL = [F_FRAC_AT_D, F_MEAN_FRAC, F_RADIAL_CV]
FF_SCALE = '%dof%d'
FF_OVERFLOW = 'Overflow'
FF_GENERIC = '_%s_' + FF_SCALE
FF_FRAC_AT_D = F_FRAC_AT_D + FF_GENERIC
FF_MEAN_FRAC = F_MEAN_FRAC + FF_GENERIC
FF_RADIAL_CV = F_RADIAL_CV + FF_GENERIC
MF_FRAC_AT_D = '_'.join((M_CATEGORY,FF_FRAC_AT_D))
MF_MEAN_FRAC = '_'.join((M_CATEGORY,FF_MEAN_FRAC))
MF_RADIAL_CV = '_'.join((M_CATEGORY,FF_RADIAL_CV))
OF_FRAC_AT_D = '_'.join((M_CATEGORY, F_FRAC_AT_D, "%s", FF_OVERFLOW))
OF_MEAN_FRAC = '_'.join((M_CATEGORY, F_MEAN_FRAC, "%s", FF_OVERFLOW))
OF_RADIAL_CV = '_'.join((M_CATEGORY, F_RADIAL_CV, "%s", FF_OVERFLOW))
'''# of settings aside from groups'''
SETTINGS_STATIC_COUNT = 3
'''# of settings in image group'''
SETTINGS_IMAGE_GROUP_COUNT = 1
'''# of settings in object group'''
SETTINGS_OBJECT_GROUP_COUNT = 3
'''# of settings in bin group, v1'''
SETTINGS_BIN_GROUP_COUNT_V1 = 1
'''# of settings in bin group, v2'''
SETTINGS_BIN_GROUP_COUNT_V2 = 3
SETTINGS_BIN_GROUP_COUNT = 3
'''# of settings in heatmap group, v4'''
SETTINGS_HEATMAP_GROUP_COUNT_V4 = 7
SETTINGS_HEATMAP_GROUP_COUNT = 7
'''Offset of center choice in object group'''
SETTINGS_CENTER_CHOICE_OFFSET = 1
A_FRAC_AT_D = "Fraction at Distance"
A_MEAN_FRAC = "Mean Fraction"
A_RADIAL_CV = "Radial CV"
MEASUREMENT_CHOICES = [A_FRAC_AT_D, A_MEAN_FRAC, A_RADIAL_CV]
MEASUREMENT_ALIASES = {
A_FRAC_AT_D: MF_FRAC_AT_D,
A_MEAN_FRAC: MF_MEAN_FRAC,
A_RADIAL_CV: MF_RADIAL_CV }
class MeasureObjectRadialDistribution(cpm.CPModule):
module_name = "MeasureObjectRadialDistribution"
category = "Measurement"
variable_revision_number = 4
def create_settings(self):
self.images = []
self.objects = []
self.bin_counts = []
self.heatmaps = []
self.image_count = cps.HiddenCount(self.images)
self.object_count = cps.HiddenCount(self.objects)
self.bin_counts_count = cps.HiddenCount(self.bin_counts)
self.heatmap_count = cps.HiddenCount(self.heatmaps)
self.add_image_button = cps.DoSomething("", "Add another image", self.add_image)
self.spacer_1 = cps.Divider()
self.add_object_button = cps.DoSomething("", "Add another object",
self.add_object)
self.spacer_2 = cps.Divider()
self.add_bin_count_button = cps.DoSomething(
"", "Add another set of bins", self.add_bin_count)
self.spacer_3 = cps.Divider()
self.add_heatmap_button = cps.DoSomething(
"", "Add another heatmap display", self.add_heatmap,
doc = """
Press this button to add a display of one of the radial distribution
measurements. Each radial band of the object is colored using a
heatmap according to the measurement value for that band.""")
self.add_image(can_remove = False)
self.add_object(can_remove = False)
self.add_bin_count(can_remove = False)
def add_image(self, can_remove = True):
'''Add an image to be measured'''
group = cps.SettingsGroup()
if can_remove:
group.append("divider", cps.Divider(line=False))
group.append("image_name", cps.ImageNameSubscriber(
"Select an image to measure", cps.NONE, doc="""
Select the image that you want to measure the intensity from."""))
if can_remove:
group.append("remover", cps.RemoveSettingButton("", "Remove this image", self.images, group))
self.images.append(group)
def add_object(self, can_remove = True):
'''Add an object to be measured (plus optional centers)'''
group = cps.SettingsGroup()
if can_remove:
group.append("divider", cps.Divider(line=False))
group.append("object_name", cps.ObjectNameSubscriber(
"Select objects to measure", cps.NONE,doc="""
Select the objects that you want to measure the intensity from."""))
group.append("center_choice", cps.Choice(
"Object to use as center?", C_ALL,doc="""
There are three ways to specify the center of the radial measurement:
<ul>
<li><i>%(C_SELF)s:</i> Use the centers of these objects for the
radial measurement.</li>
<li><i>%(C_CENTERS_OF_OTHER)s:</i> Use the centers of other objects
for the radial measurement.</li>
<li><i>%(C_EDGES_OF_OTHER)s:</i> Measure distances from the
edge of the other object to each pixel outside of the
centering object. Do not include pixels within the centering
object in the radial measurement calculations.</li>
</ul>
For example, if measuring the radial distribution in a Cell
object, you can use the center of the Cell objects (<i>%(C_SELF)s</i>)
or you can use previously identified Nuclei objects as
the centers (<i>%(C_CENTERS_OF_OTHER)s</i>)."""%globals()))
group.append("center_object_name", cps.ObjectNameSubscriber(
"Select objects to use as centers", cps.NONE, doc="""
<i>(Used only if "%(C_CENTERS_OF_OTHER)s" are selected for centers)</i><br>
Select the object to use as the center, or select <i>None</i> to
use the input object centers (which is the same as selecting
<i>%(C_SELF)s</i> for the object centers)."""%globals()))
if can_remove:
group.append("remover", cps.RemoveSettingButton("", "Remove this object", self.objects, group))
self.objects.append(group)
def add_bin_count(self, can_remove = True):
'''Add another radial bin count at which to measure'''
group = cps.SettingsGroup()
if can_remove:
group.append("divider", cps.Divider(line=False))
group.append("wants_scaled", cps.Binary(
"Scale the bins?", True,doc ="""
<p>Select <i>%(YES)s</i> to divide the object radially into the number
of bins that you specify. </p>
<p>Select <i>%(NO)s</i> to create the number of bins you specify based
on distance. For this option, the user will be
asked to specify a maximum distance so that each object will have the
same measurements (which might be zero for small objects) and so that
the measurements can be taken without knowing the maximum object radius
before the run starts.</p>"""%globals()))
group.append("bin_count", cps.Integer(
"Number of bins", 4, 2, doc="""
Specify the number of bins that you want to use to measure
the distribution. Radial distribution is measured with respect to a series
of concentric rings starting from the object center (or
more generally, between contours at a normalized distance
from the object center). This number
specifies the number of rings into which the distribution is to
be divided. Additional ring counts can be specified
by clicking the <i>Add another set of bins</i> button."""))
group.append("maximum_radius", cps.Integer(
"Maximum radius", 100, minval = 1,doc = """
Specify the maximum radius for the unscaled bins. The unscaled binning
method creates the number of bins that you
specify and creates equally spaced bin boundaries up to the maximum
radius. Parts of the object that are beyond this radius will be
counted in an overflow bin. The radius is measured in pixels."""))
group.can_remove = can_remove
if can_remove:
group.append("remover", cps.RemoveSettingButton("", "Remove this set of bins", self.bin_counts, group))
self.bin_counts.append(group)
def get_bin_count_choices(self, pipeline=None):
choices = []
for bin_count in self.bin_counts:
nbins = str(bin_count.bin_count.value)
if nbins != choices:
choices.append(nbins)
return choices
def add_heatmap(self):
group = cps.SettingsGroup()
if len(self.heatmaps) > 0:
group.append("divider", cps.Divider(line=False))
group.append("image_name", MORDImageNameSubscriber(
"Image", doc="""
The heatmap will be displayed with measurements taken using this image.
The setting will let you choose from among the images you have
specified in "Select image to measure".
"""))
group.image_name.set_module(self)
group.append("object_name", MORDObjectNameSubscriber(
"Objects to display", doc = """
The objects to display in the heatmap. You can select any of the
objects chosen in "Select objects to measure".
"""))
group.object_name.set_module(self)
group.append("bin_count", cps.Choice(
"Number of bins", self.get_bin_count_choices(),
choices_fn = self.get_bin_count_choices))
def get_number_of_bins(module = self, group=group):
if len(module.bin_counts) == 1:
return module.bin_counts[0].bin_count.value
else:
return int(group.bin_count.value)
group.get_number_of_bins = get_number_of_bins
group.append("measurement", cps.Choice(
"Measurement", MEASUREMENT_CHOICES,
doc="""The measurement to display."""))
group.append("colormap", cps.Colormap(
"Color map",
doc="""
The color map setting chooses the color palette that will be
used to render the different values for your measurement. If you
choose "gray", the image will label each of the bins with the
actual image measurement.
"""))
group.append("wants_to_save_display", cps.Binary(
"Save display as image?", False,
doc = """This setting allows you to save the heatmap display as
an image that can be output using the <b>SaveImages</b> module.
Choose <i>yes</i> to save the display or <i>no</i> if the
display is not needed."""))
group.append("display_name", cps.ImageNameProvider(
"Output image name", "Heatmap",
doc = """
<i>(Only used if "Save display as image?" is "yes")</i><br>
This setting names the heatmap image so that the name you enter
here can be selected in a later <b>SaveImages</b> or other module.
"""))
group.append("remover", cps.RemoveSettingButton(
"", "Remove this heatmap display", self.heatmaps, group))
self.heatmaps.append(group)
def validate_module(self, pipeline):
"""Make sure chosen objects, images and bins are selected only once"""
images = set()
for group in self.images:
if group.image_name.value in images:
raise cps.ValidationError(
"%s has already been selected" %group.image_name.value,
group.image_name)
images.add(group.image_name.value)
objects = set()
for group in self.objects:
if group.object_name.value in objects:
raise cps.ValidationError(
"%s has already been selected" %group.object_name.value,
group.object_name)
objects.add(group.object_name.value)
bins = set()
for group in self.bin_counts:
if group.bin_count.value in bins:
raise cps.ValidationError(
"%s has already been selected" %group.bin_count.value,
group.bin_count)
bins.add(group.bin_count.value)
def settings(self):
result = [self.image_count, self.object_count, self.bin_counts_count,
self.heatmap_count]
for x in (self.images, self.objects, self.bin_counts, self.heatmaps):
for settings in x:
temp = settings.pipeline_settings()
result += temp
return result
def visible_settings(self):
result = []
for settings in self.images:
result += settings.visible_settings()
result += [self.add_image_button, self.spacer_1]
for settings in self.objects:
temp = settings.visible_settings()
if settings.center_choice.value == C_SELF:
temp.remove(settings.center_object_name)
result += temp
result += [self.add_object_button, self.spacer_2]
for settings in self.bin_counts:
result += [settings.wants_scaled, settings.bin_count]
if not settings.wants_scaled:
result += [settings.maximum_radius]
if settings.can_remove:
result += [settings.remover]
result += [self.add_bin_count_button, self.spacer_3]
for settings in self.heatmaps:
if hasattr(settings, "divider"):
result.append(settings.divider)
if settings.image_name.is_visible():
result.append(settings.image_name)
if settings.object_name.is_visible():
result.append(settings.object_name)
if len(self.bin_counts) > 1:
result.append(settings.bin_count)
result += [settings.measurement, settings.colormap,
settings.wants_to_save_display]
if settings.wants_to_save_display:
result.append(settings.display_name)
result.append(settings.remover)
result += [self.add_heatmap_button]
return result
def prepare_settings(self, setting_values):
'''Adjust the numbers of images, objects and bin counts'''
image_count, objects_count, bin_counts_count, heatmap_count = \
[int(x) for x in setting_values[:4]]
for sequence, add_fn, count in \
((self.images, self.add_image, image_count),
(self.objects, self.add_object, objects_count),
(self.bin_counts, self.add_bin_count, bin_counts_count),
(self.heatmaps, self.add_heatmap, heatmap_count)):
while len(sequence) > count:
del sequence[-1]
while len(sequence) < count:
add_fn()
def run(self, workspace):
header = ("Image","Objects","Bin # (innermost=1)","Bin count","Fraction","Intensity","COV")
stats = []
d = {}
for image in self.images:
for o in self.objects:
for bin_count_settings in self.bin_counts:
stats += \
self.do_measurements(workspace,
image.image_name.value,
o.object_name.value,
o.center_object_name.value
if o.center_choice != C_SELF
else None,
o.center_choice.value,
bin_count_settings,
d)
if self.show_window:
workspace.display_data.header = header
workspace.display_data.stats = stats
workspace.display_data.heatmaps = []
for heatmap in self.heatmaps:
heatmap_img = d.get(id(heatmap))
if heatmap_img is not None:
if self.show_window or heatmap.wants_to_save_display:
labels = workspace.object_set.get_objects(
heatmap.object_name.get_objects_name()).segmented
if self.show_window:
workspace.display_data.heatmaps.append(
(heatmap_img, labels != 0))
if heatmap.wants_to_save_display:
colormap = heatmap.colormap.value
if colormap == matplotlib.cm.gray.name:
output_pixels = heatmap_img
else:
if colormap == cps.DEFAULT:
colormap = cpprefs.get_default_colormap()
cm = matplotlib.cm.ScalarMappable(
cmap = colormap)
output_pixels = cm.to_rgba(heatmap_img)[:, :, :3]
output_pixels[labels == 0, :] = 0
parent_image = workspace.image_set.get_image(
heatmap.image_name.get_image_name())
output_img = cpi.Image(
output_pixels,
parent_image = parent_image)
img_name = heatmap.display_name.value
workspace.image_set.add(img_name, output_img)
def display(self, workspace, figure):
header = workspace.display_data.header
stats = workspace.display_data.stats
n_plots = len(workspace.display_data.heatmaps) + 1
n_vert = int(np.sqrt(n_plots))
n_horiz = int(np.ceil(float(n_plots) / n_vert))
figure.set_subplots((n_horiz, n_vert))
figure.subplot_table(0, 0, stats, col_labels=header)
idx = 1
sharexy = None
for heatmap, (heatmap_img, mask) in zip(
self.heatmaps, workspace.display_data.heatmaps):
heatmap_img = np.ma.array(heatmap_img, mask=~mask)
if heatmap_img is not None:
title = "%s %s %s" % (
heatmap.image_name.get_image_name(),
heatmap.object_name.get_objects_name(),
heatmap.measurement.value)
x = idx % n_horiz
y = int(idx / n_horiz)
colormap = heatmap.colormap.value
if colormap == cps.DEFAULT:
colormap = cpprefs.get_default_colormap()
if sharexy is None:
sharexy = figure.subplot_imshow(
x, y, heatmap_img,
title = title,
colormap = colormap,
normalize = False,
vmin = np.min(heatmap_img),
vmax = np.max(heatmap_img),
colorbar = True)
else:
figure.subplot_imshow(
x, y, heatmap_img,
title = title,
colormap = colormap,
colorbar = True,
normalize = False,
vmin = np.min(heatmap_img),
vmax = np.max(heatmap_img),
sharexy = sharexy)
idx += 1
def do_measurements(self, workspace, image_name, object_name,
center_object_name, center_choice,
bin_count_settings, dd):
'''Perform the radial measurements on the image set
workspace - workspace that holds images / objects
image_name - make measurements on this image
object_name - make measurements on these objects
center_object_name - use the centers of these related objects as
the centers for radial measurements. None to use the
objects themselves.
center_choice - the user's center choice for this object:
C_SELF, C_CENTERS_OF_OBJECTS or C_EDGES_OF_OBJECTS.
bin_count_settings - the bin count settings group
d - a dictionary for saving reusable partial results
returns one statistics tuple per ring.
'''
assert isinstance(workspace, cpw.Workspace)
assert isinstance(workspace.object_set, cpo.ObjectSet)
bin_count = bin_count_settings.bin_count.value
wants_scaled = bin_count_settings.wants_scaled.value
maximum_radius = bin_count_settings.maximum_radius.value
image = workspace.image_set.get_image(image_name,
must_be_grayscale=True)
objects = workspace.object_set.get_objects(object_name)
labels, pixel_data = cpo.crop_labels_and_image(objects.segmented,
image.pixel_data)
nobjects = np.max(objects.segmented)
measurements = workspace.measurements
assert isinstance(measurements, cpmeas.Measurements)
heatmaps = {}
for heatmap in self.heatmaps:
if heatmap.object_name.get_objects_name() == object_name and \
image_name == heatmap.image_name.get_image_name() and \
heatmap.get_number_of_bins() == bin_count:
dd[id(heatmap)] = \
heatmaps[MEASUREMENT_ALIASES[heatmap.measurement.value]] = \
np.zeros(labels.shape)
if nobjects == 0:
for bin in range(1, bin_count+1):
for feature in (F_FRAC_AT_D, F_MEAN_FRAC, F_RADIAL_CV):
feature_name = (
(feature + FF_GENERIC) % (image_name, bin, bin_count))
measurements.add_measurement(
object_name, "_".join([M_CATEGORY, feature_name]),
np.zeros(0))
if not wants_scaled:
measurement_name = "_".join([M_CATEGORY, feature,
image_name, FF_OVERFLOW])
measurements.add_measurement(
object_name, measurement_name, np.zeros(0))
return [(image_name, object_name, "no objects","-","-","-","-")]
name = (object_name if center_object_name is None
else "%s_%s"%(object_name, center_object_name))
if dd.has_key(name):
normalized_distance, i_center, j_center, good_mask = dd[name]
else:
d_to_edge = distance_to_edge(labels)
if center_object_name is not None:
#
# Use the center of the centering objects to assign a center
# to each labeled pixel using propagation
#
center_objects=workspace.object_set.get_objects(center_object_name)
center_labels, cmask = cpo.size_similarly(
labels, center_objects.segmented)
pixel_counts = fix(scind.sum(
np.ones(center_labels.shape),
center_labels,
np.arange(1, np.max(center_labels)+1,dtype=np.int32)))
good = pixel_counts > 0
i,j = (centers_of_labels(center_labels) + .5).astype(int)
ig = i[good]
jg = j[good]
lg = np.arange(1, len(i)+1)[good]
if center_choice == C_CENTERS_OF_OTHER:
#
# Reduce the propagation labels to the centers of
# the centering objects
#
center_labels = np.zeros(center_labels.shape, int)
center_labels[ig,jg] = lg
cl,d_from_center = propagate(np.zeros(center_labels.shape),
center_labels,
labels != 0, 1)
#
# Erase the centers that fall outside of labels
#
cl[labels == 0] = 0
#
# If objects are hollow or crescent-shaped, there may be
# objects without center labels. As a backup, find the
# center that is the closest to the center of mass.
#
missing_mask = (labels != 0) & (cl == 0)
missing_labels = np.unique(labels[missing_mask])
if len(missing_labels):
all_centers = centers_of_labels(labels)
missing_i_centers, missing_j_centers = \
all_centers[:, missing_labels-1]
di = missing_i_centers[:, np.newaxis] - ig[np.newaxis, :]
dj = missing_j_centers[:, np.newaxis] - jg[np.newaxis, :]
missing_best = lg[np.argsort((di*di + dj*dj, ))[:, 0]]
best = np.zeros(np.max(labels) + 1, int)
best[missing_labels] = missing_best
cl[missing_mask] = best[labels[missing_mask]]
#
# Now compute the crow-flies distance to the centers
# of these pixels from whatever center was assigned to
# the object.
#
iii, jjj = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
di = iii[missing_mask] - i[cl[missing_mask] - 1]
dj = jjj[missing_mask] - j[cl[missing_mask] - 1]
d_from_center[missing_mask] = np.sqrt(di*di + dj*dj)
else:
# Find the point in each object farthest away from the edge.
# This does better than the centroid:
# * The center is within the object
# * The center tends to be an interesting point, like the
# center of the nucleus or the center of one or the other
# of two touching cells.
#
i,j = maximum_position_of_labels(d_to_edge, labels, objects.indices)
center_labels = np.zeros(labels.shape, int)
center_labels[i,j] = labels[i,j]
#
# Use the coloring trick here to process touching objects
# in separate operations
#
colors = color_labels(labels)
ncolors = np.max(colors)
d_from_center = np.zeros(labels.shape)
cl = np.zeros(labels.shape, int)
for color in range(1,ncolors+1):
mask = colors == color
l,d = propagate(np.zeros(center_labels.shape),
center_labels,
mask, 1)
d_from_center[mask] = d[mask]
cl[mask] = l[mask]
good_mask = cl > 0
if center_choice == C_EDGES_OF_OTHER:
# Exclude pixels within the centering objects
# when performing calculations from the centers
good_mask = good_mask & (center_labels == 0)
i_center = np.zeros(cl.shape)
i_center[good_mask] = i[cl[good_mask]-1]
j_center = np.zeros(cl.shape)
j_center[good_mask] = j[cl[good_mask]-1]
normalized_distance = np.zeros(labels.shape)
if wants_scaled:
total_distance = d_from_center + d_to_edge
normalized_distance[good_mask] = (d_from_center[good_mask] /
(total_distance[good_mask] + .001))
else:
normalized_distance[good_mask] = \
d_from_center[good_mask] / maximum_radius
dd[name] = [normalized_distance, i_center, j_center, good_mask]
ngood_pixels = np.sum(good_mask)
good_labels = labels[good_mask]
bin_indexes = (normalized_distance * bin_count).astype(int)
bin_indexes[bin_indexes > bin_count] = bin_count
labels_and_bins = (good_labels-1, bin_indexes[good_mask])
histogram = coo_matrix((pixel_data[good_mask], labels_and_bins),
(nobjects, bin_count+1)).toarray()
sum_by_object = np.sum(histogram, 1)
sum_by_object_per_bin = np.dstack([sum_by_object]*(bin_count + 1))[0]
fraction_at_distance = histogram / sum_by_object_per_bin
number_at_distance = coo_matrix((np.ones(ngood_pixels),labels_and_bins),
(nobjects, bin_count+1)).toarray()
object_mask = number_at_distance > 0
sum_by_object = np.sum(number_at_distance, 1)
sum_by_object_per_bin = np.dstack([sum_by_object]*(bin_count+1))[0]
fraction_at_bin = number_at_distance / sum_by_object_per_bin
mean_pixel_fraction = fraction_at_distance / (fraction_at_bin +
np.finfo(float).eps)
masked_fraction_at_distance = masked_array(fraction_at_distance,
~object_mask)
masked_mean_pixel_fraction = masked_array(mean_pixel_fraction,
~object_mask)
# Anisotropy calculation. Split each cell into eight wedges, then
# compute coefficient of variation of the wedges' mean intensities
# in each ring.
#
# Compute each pixel's delta from the center object's centroid
i,j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
imask = i[good_mask] > i_center[good_mask]
jmask = j[good_mask] > j_center[good_mask]
absmask = (abs(i[good_mask] - i_center[good_mask]) >
abs(j[good_mask] - j_center[good_mask]))
radial_index = (imask.astype(int) + jmask.astype(int)*2 +
absmask.astype(int)*4)
statistics = []
for bin in range(bin_count + (0 if wants_scaled else 1)):
bin_mask = (good_mask & (bin_indexes == bin))
bin_pixels = np.sum(bin_mask)
bin_labels = labels[bin_mask]
bin_radial_index = radial_index[bin_indexes[good_mask] == bin]
labels_and_radii = (bin_labels-1, bin_radial_index)
radial_values = coo_matrix((pixel_data[bin_mask],
labels_and_radii),
(nobjects, 8)).toarray()
pixel_count = coo_matrix((np.ones(bin_pixels), labels_and_radii),
(nobjects, 8)).toarray()
mask = pixel_count==0
radial_means = masked_array(radial_values / pixel_count, mask)
radial_cv = np.std(radial_means,1) / np.mean(radial_means, 1)
radial_cv[np.sum(~mask,1)==0] = 0
for measurement, feature, overflow_feature in (
(fraction_at_distance[:,bin], MF_FRAC_AT_D, OF_FRAC_AT_D),
(mean_pixel_fraction[:,bin], MF_MEAN_FRAC, OF_MEAN_FRAC),
(np.array(radial_cv), MF_RADIAL_CV, OF_RADIAL_CV)):
if bin == bin_count:
measurement_name = overflow_feature % image_name
else:
measurement_name = feature % (image_name, bin+1, bin_count)
measurements.add_measurement(object_name,
measurement_name,
measurement)
if feature in heatmaps:
heatmaps[feature][bin_mask] = measurement[bin_labels-1]
radial_cv.mask = np.sum(~mask,1)==0
bin_name = str(bin+1) if bin < bin_count else "Overflow"
statistics += [(image_name, object_name, bin_name, str(bin_count),
round(np.mean(masked_fraction_at_distance[:,bin]),4),
round(np.mean(masked_mean_pixel_fraction[:, bin]),4),
round(np.mean(radial_cv),4))]
return statistics
def get_measurement_columns(self, pipeline):
columns = []
for image in self.images:
for o in self.objects:
for bin_count_obj in self.bin_counts:
bin_count = bin_count_obj.bin_count.value
wants_scaling = bin_count_obj.wants_scaled.value
for feature, ofeature in (
(MF_FRAC_AT_D, OF_FRAC_AT_D),
(MF_MEAN_FRAC, OF_MEAN_FRAC),
(MF_RADIAL_CV, OF_RADIAL_CV)):
for bin in range(1,bin_count+1):
columns.append((o.object_name.value,
feature % (image.image_name.value,
bin, bin_count),
cpmeas.COLTYPE_FLOAT))
if not wants_scaling:
columns.append(
(o.object_name.value,
ofeature % image.image_name.value,
cpmeas.COLTYPE_FLOAT))
return columns
def get_categories(self, pipeline, object_name):
if object_name in [x.object_name.value for x in self.objects]:
return [M_CATEGORY]
return []
def get_measurements(self, pipeline, object_name, category):
if category in self.get_categories(pipeline, object_name):
return F_ALL
return []
def get_measurement_images(self, pipeline, object_name, category, feature):
if feature in self.get_measurements(pipeline, object_name, category):
return [image.image_name.value for image in self.images]
return []
def get_measurement_scales(self, pipeline, object_name, category, feature,
image_name):
if image_name in self.get_measurement_images(pipeline, object_name,
category, feature):
result = [FF_SCALE % (bin,bin_count.bin_count.value)
for bin_count in self.bin_counts
for bin in range(1, bin_count.bin_count.value+1)]
if any([not bin_count.wants_scaled.value
for bin_count in self.bin_counts]):
result += [FF_OVERFLOW]
return result
return []
def upgrade_settings(self,setting_values,variable_revision_number,
module_name,from_matlab):
if from_matlab and variable_revision_number == 1:
image_name, object_name, center_name, bin_count = setting_values[:4]
if center_name == cps.DO_NOT_USE:
center_choice = C_SELF
else:
center_choice = C_CENTERS_OF_OTHER
setting_values = ["1","1","1",image_name,
object_name, center_choice, center_name,
bin_count]
variable_revision_number = 1
from_matlab = False
if variable_revision_number == 1:
n_images, n_objects, n_bins = [
int(setting) for setting in setting_values[:3]]
off_bins = (SETTINGS_STATIC_COUNT +
n_images * SETTINGS_IMAGE_GROUP_COUNT +
n_objects * SETTINGS_OBJECT_GROUP_COUNT)
new_setting_values = setting_values[:off_bins]
for bin_count in setting_values[off_bins:]:
new_setting_values += [ cps.YES, bin_count, "100"]
setting_values = new_setting_values
variable_revision_number = 2
if variable_revision_number == 2:
n_images, n_objects = [
int(setting) for setting in setting_values[:2]]
off_objects = (SETTINGS_STATIC_COUNT +
n_images * SETTINGS_IMAGE_GROUP_COUNT)
setting_values = list(setting_values)
for i in range(n_objects):
offset = (off_objects + i * SETTINGS_OBJECT_GROUP_COUNT +
SETTINGS_CENTER_CHOICE_OFFSET)
if setting_values[offset] == C_CENTERS_OF_OTHER_V2:
setting_values[offset] = C_CENTERS_OF_OTHER
variable_revision_number = 3
if variable_revision_number == 3:
# added heatmaps
# Need a heatmap_count = 0
#
setting_values = setting_values[:3] + ["0"] + setting_values[3:]
variable_revision_number = 4
return setting_values, variable_revision_number, from_matlab
class MORDObjectNameSubscriber(cps.ObjectNameSubscriber):
'''An object name subscriber limited by the objects in the objects' group'''
def set_module(self, module):
assert isinstance(module, MeasureObjectRadialDistribution)
self.__module = module
def __is_valid_choice(self, choice_tuple):
for object_group in self.__module.objects:
if choice_tuple[0] == object_group.object_name:
return True
return False
def get_choices(self, pipeline):
super_choices = super(self.__class__, self).get_choices(pipeline)
return filter(self.__is_valid_choice, super_choices)
def is_visible(self):
'''Return True if a choice should be displayed'''
return len(self.__module.objects) > 1
def get_objects_name(self):
'''Return the name of the objects to use in the display'''
if len(self.__module.objects) == 1:
return self.__module.objects[0].object_name.value
else:
return self.value
class MORDImageNameSubscriber(cps.ImageNameSubscriber):
'''An image name subscriber limited by the images in the image group'''
def set_module(self, module):
assert isinstance(module, MeasureObjectRadialDistribution)
self.__module = module
def __is_valid_choice(self, choice_tuple):
for image_group in self.__module.images:
if choice_tuple[0] == image_group.image_name:
return True
return False
def get_choices(self, pipeline):
super_choices = super(self.__class__, self).get_choices(pipeline)
return filter(self.__is_valid_choice, super_choices)
def is_visible(self):
'''Return True if a choice should be displayed'''
return len(self.__module.images) > 1
def get_image_name(self):
'''Return the name of the image to use in the display'''
if len(self.__module.images) == 1:
return self.__module.images[0].image_name.value
else:
return self.value
| gpl-2.0 |
heroxbd/SHTOOLS | examples/python/TestLegendre/TestLegendre.py | 1 | 4995 | #!/usr/bin/env python
"""
This script tests and plots all Geodesy normalized Legendre functions.
Parameters can be changed in the main function.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
from pyshtools import shtools
# set shtools plot style:
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
mpl.rcParams.update(style_shtools)
# ==== MAIN FUNCTION ====
def main():
# --- input parameters (change here) ---
normalization = ''
# normalization should be one of ['Bar','Schmidt','ON','']
lmax = 40 # maximum degree
mplot = min(lmax, 10)
# maximum plotting order (all degrees are plotted)
# --- run tests ---
test_associatedlegendre(lmax, mplot, normalization)
test_legendre(lmax, normalization)
# ==== TEST LEGENDRE FUNCTIONS ====
def test_legendre(lmax, normalization):
print('testing Pl{0} and Pl{0}_d1...'.format(normalization))
# --- import function from shtools ---
if normalization == '':
Pl = shtools.PLegendre
Pl_d1 = shtools.PLegendre_d1
else:
Pl = getattr(shtools, 'Pl' + normalization)
Pl_d1 = getattr(shtools, 'Pl' + normalization + '_d1')
# --- derived parameters ---
npoints = 5 * lmax
ls = np.arange(lmax)
cost = np.cos(np.linspace(np.pi / npoints, np.pi - np.pi / npoints,
npoints))
# --- create arrays to store Legendre functions of degrees l and orders
# --- m at all points cost
Pl1 = np.zeros((npoints, lmax))
Pl2 = np.zeros((npoints, lmax))
dPl2 = np.zeros((npoints, lmax))
for iz, z in enumerate(cost):
Pl1_buf = Pl(lmax, z)
Pl2_buf, dPl2_buf = Pl_d1(lmax, z)
for l in ls:
Pl1[iz, l] = Pl1_buf[l]
Pl2[iz, l] = Pl2_buf[l]
dPl2[iz, l] = dPl2_buf[l]
# ---- check if both subroutines computed the same Legendre functions ---
if not np.allclose(Pl1, Pl2, rtol=1e-10):
raise Exception('Legendre functions from PlmON and PlmON_d1 are ' +
'different (rtol>1e-10)')
# ---- plot the legendre functions and derivatives up to maximum
# ---- order mplot
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(15, 6))
fig.suptitle('orthonormalized Legendre functions (col1) and ' +
'derivatives (col2)')
ax[0].imshow(Pl1[:, :], extent=(0., lmax, 0., np.pi), aspect='auto')
ax[1].imshow(dPl2[:, :], extent=(0., lmax, 0., np.pi), aspect='auto')
ax[1].set_xlabel('l')
fig.savefig('legendre.png')
# ==== TEST ASSOCIATED LEGENDRE FUNCTIONS ====
def test_associatedlegendre(lmax, mplot, normalization):
print('testing Plm{0} and Plm{0}_d1...'.format(normalization))
# --- import function from shtools ---
if normalization == '':
Plm = shtools.PLegendreA
Plm_d1 = shtools.PLegendreA_d1
else:
Plm = getattr(shtools, 'Plm' + normalization)
Plm_d1 = getattr(shtools, 'Plm' + normalization + '_d1')
# --- derived parameters ---
npoints = 5 * lmax
ls = np.arange(lmax)
cost = np.cos(np.linspace(np.pi / npoints, np.pi - np.pi / npoints,
npoints))
# --- create arrays to store Legendre functions of degrees l and orders
# ----m at all points cost
Plm1 = np.zeros((npoints, lmax, lmax))
Plm2 = np.zeros((npoints, lmax, lmax))
dPlm2 = np.zeros((npoints, lmax, lmax))
for iz, z in enumerate(cost):
Plm1_buf = Plm(lmax, z)
Plm2_buf, dPlm2_buf = Plm_d1(lmax, z)
for l in ls:
for m in np.arange(l):
ind = shtools.PlmIndex(l, m) - 1 # Fortran indexing
Plm1[iz, l, m] = Plm1_buf[ind]
Plm2[iz, l, m] = Plm2_buf[ind]
dPlm2[iz, l, m] = dPlm2_buf[ind]
# ---- check if both subroutines computed the same Legendre functions ---
if not np.allclose(Plm1_buf, Plm2_buf, rtol=1e-10):
raise Exception('Legendre functions from PlmON and PlmON_d1 are ' +
'different (rtol>1e-10)')
# ---- plot the legendre functions and derivatives up to maximum
# --- order mplot
fig, ax = plt.subplots(2, mplot, sharey=True, sharex=True, figsize=(15, 6))
fig.suptitle('orthonormalized associated Legendre functions (row1) and ' +
'derivatives (row2)')
for m in range(mplot):
ax[0, m].imshow(Plm1[:, :, m], extent=(0., lmax, 0., np.pi),
aspect='auto')
ax[0, m].set_title('m=%d' % m)
ax[1, m].imshow(dPlm2[:, :, m], extent=(0., lmax, 0., np.pi),
aspect='auto')
ax[1, m].set_xlabel('l')
fig.savefig('associatedlegendre.png')
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/testing/jpl_units/__init__.py | 8 | 3266 | #=======================================================================
"""
This is a sample set of units for use with testing unit conversion
of matplotlib routines. These are used because they use very strict
enforcement of unitized data which will test the entire spectrum of how
unitized data might be used (it is not always meaningful to convert to
a float without specific units given).
UnitDbl is essentially a unitized floating point number. It has a
minimal set of supported units (enough for testing purposes). All
of the mathematical operation are provided to fully test any behaviour
that might occur with unitized data. Remeber that unitized data has
rules as to how it can be applied to one another (a value of distance
cannot be added to a value of time). Thus we need to guard against any
accidental "default" conversion that will strip away the meaning of the
data and render it neutered.
Epoch is different than a UnitDbl of time. Time is something that can be
measured where an Epoch is a specific moment in time. Epochs are typically
referenced as an offset from some predetermined epoch.
A difference of two epochs is a Duration. The distinction between a
Duration and a UnitDbl of time is made because an Epoch can have different
frames (or units). In the case of our test Epoch class the two allowed
frames are 'UTC' and 'ET' (Note that these are rough estimates provided for
testing purposes and should not be used in production code where accuracy
of time frames is desired). As such a Duration also has a frame of
reference and therefore needs to be called out as different that a simple
measurement of time since a delta-t in one frame may not be the same in another.
"""
#=======================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from .Duration import Duration
from .Epoch import Epoch
from .UnitDbl import UnitDbl
from .Duration import Duration
from .Epoch import Epoch
from .UnitDbl import UnitDbl
from .StrConverter import StrConverter
from .EpochConverter import EpochConverter
from .UnitDblConverter import UnitDblConverter
from .UnitDblFormatter import UnitDblFormatter
#=======================================================================
__version__ = "1.0"
__all__ = [
'register',
'Duration',
'Epoch',
'UnitDbl',
'UnitDblFormatter',
]
#=======================================================================
def register():
"""Register the unit conversion classes with matplotlib."""
import matplotlib.units as mplU
mplU.registry[ str ] = StrConverter()
mplU.registry[ Epoch ] = EpochConverter()
mplU.registry[ UnitDbl ] = UnitDblConverter()
#=======================================================================
# Some default unit instances
# Distances
m = UnitDbl( 1.0, "m" )
km = UnitDbl( 1.0, "km" )
mile = UnitDbl( 1.0, "mile" )
# Angles
deg = UnitDbl( 1.0, "deg" )
rad = UnitDbl( 1.0, "rad" )
# Time
sec = UnitDbl( 1.0, "sec" )
min = UnitDbl( 1.0, "min" )
hr = UnitDbl( 1.0, "hour" )
day = UnitDbl( 24.0, "hour" )
sec = UnitDbl( 1.0, "sec" )
| mit |
ragnarekker/Ice-modelling | utilities/getregobsdata.py | 1 | 50189 | # -*- coding: utf-8 -*-
import datetime as dt
import requests
import os as os
import copy as cp
from icemodelling import ice as ice, constants as const
from utilities import makepickle as mp, makelogs as ml, doconversions as dc
from utilities import getmisc as gm
import setenvironment as se
import pandas as pd
__author__ = 'ragnarekker'
def get_obs_location(LocationName):
"""Uses OData query to get the ObsLocation data for a given ObsLocation name.
:param LocationName:
:return:
"""
oDataQuery = "{0}".format(LocationName)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/ObsLocation/?$filter=LocationName eq '{1}'&$format=json"\
.format(se.odata_version, oDataQuery)
data = requests.get(url).json()
data_dict = data['d']['results'][0]
return data_dict
def get_ice_cover(LocationName, fromDate, toDate):
"""Method returns a list of IceCover objects from regObs between fromDate to toDate.
:param LocationName: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
http://api.nve.no/hydrology/regobs/v0.9.4/Odata.svc/IceCoverObsV?$filter=
DtObsTime%20gt%20datetime%272013-11-01%27%20and%20
DtObsTime%20lt%20datetime%272014-06-01%27%20and%20
LocationName%20eq%20%27Hakkloa%20nord%20372%20moh%27%20and%20
LangKey%20eq%201
"""
iceCoverList = []
if isinstance(LocationName, list):
for l in LocationName:
iceCoverList = iceCoverList + get_ice_cover(l, fromDate, toDate)
else:
view = 'IceCoverObsV'
OdataLocationName = LocationName
oDataQuery = "DtObsTime gt datetime'{0}' and " \
"DtObsTime lt datetime'{1}' and " \
"LocationName eq '{2}' and " \
"LangKey eq 1".format(fromDate, toDate, OdataLocationName)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{2}?$filter={1}&$format=json".format(se.odata_version, oDataQuery, view)
data = requests.get(url).json()
datalist = data['d']['results']
for ic in datalist:
iceCoverDate = dc.unix_time_2_normal(ic['DtObsTime'])
iceCoverName = ic['IceCoverName']
iceCoverBefore = ic['IceCoverBeforeName']
cover = ice.IceCover(iceCoverDate, iceCoverName, iceCoverBefore, LocationName)
cover.set_regid(ic['RegID'])
cover.set_utm(ic['UTMNorth'], ic['UTMEast'], ic['UTMZone'])
iceCoverList.append(cover)
return iceCoverList
def get_first_ice_cover(LocationName, fromDate, toDate):
"""Returns the first observation where ice can form on a lake. That is if the ice cover is partly or fully
formed on observation location or the lake.
If no such observation is found an "empty" ice cover is returned at fromDate.
:param LocationName: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
iceCoverSeason = get_ice_cover(LocationName, fromDate, toDate)
iceCoverSeason.sort(key=lambda IceCover: IceCover.date) # start looking at the oldest observations
for ic in iceCoverSeason:
# if the ice cover is partly or fully formed on observation location or the lake
# 2) delvis islagt på målestedet
# 3) helt islagt på målestedet
# 21) hele sjøen islagt
if (ic.iceCoverTID == 2) or (ic.iceCoverTID == 3) or (ic.iceCoverTID == 21):
# and if icecover before was
# 1) isfritt på målestedet
# 2) delvis islagt på målestedet,
# 11) islegging langs land
# 20) hele sjøen isfri, this is fist ice
if (ic.iceCoverBeforeTID == 1) or (ic.iceCoverBeforeTID == 2) or \
(ic.iceCoverBeforeTID == 11) or (ic.iceCoverBeforeTID == 20):
return ic
# datetime objects in IceCover datatype
from_date = dt.datetime.strptime(fromDate, "%Y-%m-%d")
return ice.IceCover(from_date, "Ikke gitt", 'Ikke gitt', LocationName)
def get_last_ice_cover(LocationName, fromDate, toDate):
"""Method gives the observation confirming ice is gone for the season from a lake.
It finds the first observation without ice after an observation(s) with ice.
If none is found, an "empty" icecover object is returned on the last date in the period.
Method works best when dates range over whole seasons.
:param LocationName: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
iceCoverSeason = get_ice_cover(LocationName, fromDate, toDate)
iceCoverSeason.sort(key=lambda IceCover: IceCover.date, reverse=True) # start looking at newest observations
# datetime objects in ice cover data type
to_date = dt.datetime.strptime(toDate, "%Y-%m-%d")
# make "empty" ice cover object on last date. If there is no ice cover observation confirming that ice has gone,
# this wil be returned.
noIceCover = ice.IceCover(to_date, "Ikke gitt", 'Ikke gitt', LocationName)
for ic in iceCoverSeason:
# if "Isfritt på målestedet" (TID=1) or "Hele sjøen isfri" (TID=20). That is, if we have an older "no icecover" case
if (ic.iceCoverTID == 1) or (ic.iceCoverTID == 20):
noIceCover = ic
# if "Delvis islagt på målestedet" (TID=2) or "Helt islagt på målestedet" (TID=3) or "Hele sjøen islagt" (TID=21)
if (ic.iceCoverTID == 2) or (ic.iceCoverTID == 3) or (ic.iceCoverTID == 21):
return noIceCover # we have confirmed ice on the lake so we return the no ice cover observation
return noIceCover
def get_ice_thickness_on_regid(regid):
view = 'IceThicknessV'
oDataQuery = "RegID eq {0} and " \
"LangKey eq 1".format(regid)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{2}?$filter={1}&$format=json".format(se.odata_version,
oDataQuery, view)
data = requests.get(url).json()
datalist = data['d']['results']
#Only one ice column pr regid
ice_column = _parse_ice_column(datalist[0])
return ice_column
def get_ice_thickness_on_location(LocationName, fromDate, toDate):
"""Method returns a list of ice thickness between two dates for a given location in regObs.
:param LocationName: [string/list] name as given in regObs in ObsLocation table. Multiploe locations posible
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
ice_columns = []
if isinstance(LocationName, list):
for l in LocationName:
ice_columns = ice_columns + get_ice_thickness_on_location(l, fromDate, toDate)
else:
view = 'IceThicknessV'
OdataLocationName = LocationName
oDataQuery = "DtObsTime gt datetime'{0}' and " \
"DtObsTime lt datetime'{1}' and " \
"LocationName eq '{2}' and " \
"LangKey eq 1".format(fromDate, toDate, OdataLocationName)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{2}?$filter={1}&$format=json".format(se.odata_version, oDataQuery, view)
data = requests.get(url).json()
datalist = data['d']['results']
for ic in datalist:
ice_column = _parse_ice_column(ic)
if ice_column:
ice_columns.append(ice_column)
return ice_columns
def _parse_ice_column(ic):
RegID = ic['RegID']
layers = get_ice_thickness_layers(RegID)
ice_column = None
if layers is not None:
date = dc.unix_time_2_normal(ic['DtObsTime'])
if len(layers) == 0:
layers = [ice.IceLayer(float(ic['IceThicknessSum']), 'unknown')]
ice_column = ice.IceColumn(date, layers)
ice_column.add_metadata('RegID', RegID)
ice_column.add_metadata('LocationName', ic['LocationName'])
ice_column.add_metadata('UTMNorth', ic['UTMNorth'])
ice_column.add_metadata('UTMEast', ic['UTMEast'])
ice_column.add_metadata('UTMZone', ic['UTMZone'])
ice_column.add_layer_at_index(0, ice.IceLayer(ic['SlushSnow'], 'slush'))
ice_column.add_layer_at_index(0, ice.IceLayer(ic['SnowDepth'], 'snow'))
ice_column.merge_and_remove_excess_layers()
ice_column.update_draft_thickness()
ice_column.update_top_layer_is_slush()
iha = ic['IceHeightAfter']
# if ice height after is not given I make an estimate so that I know where to put it in the plot
if iha is None:
ice_column.update_water_line()
ice_column.add_metadata('IceHeightAfter', 'Modeled')
iha = ice_column.draft_thickness - ice_column.water_line
if ice_column.top_layer_is_slush:
iha = iha + const.snow_pull_on_water
ice_column.water_line = ice_column.draft_thickness - float(iha)
if ice_column.top_layer_is_slush is True:
ice_column.water_line -= ice_column.column[0].height
return ice_column
def get_all_season_ice_on_location(LocationNames, fromDate, toDate):
"""Uses odata-api. This returns a list of all ice columns in a period from fromDate to toDate.
At index 0 is first ice (date with no ice layers) and on last index (-1)
is last ice which is the date where there is no more ice on the lake.
If no first or last ice is found in regObs the first or/and last dates in the request is used for initial and
end of ice cover season,
:param LocationNames: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
if not isinstance(LocationNames, list):
LocationNames = [LocationNames]
all_columns = []
for LocationName in LocationNames:
first = get_first_ice_cover(LocationName, fromDate, toDate)
last = get_last_ice_cover(LocationName, fromDate, toDate)
start_column = []
end_column = []
fc = ice.IceColumn(first.date, 0)
fc.add_metadata('LocationName', first.locationName)
fc.add_metadata('RegID', first.RegID)
fc.add_metadata('UTMNorth', first.UTMNorth)
fc.add_metadata('UTMEast', first.UTMEast)
fc.add_metadata('UTMZone', first.UTMZone)
start_column.append(fc)
lc = ice.IceColumn(last.date, 0)
lc.add_metadata('LocationName', last.locationName)
lc.add_metadata('RegID', last.RegID)
lc.add_metadata('UTMNorth', last.UTMNorth)
lc.add_metadata('UTMEast', last.UTMEast)
lc.add_metadata('UTMZone', last.UTMZone)
end_column.append(lc)
columns = get_ice_thickness_on_location(LocationName, fromDate, toDate)
all_columns += (start_column + columns + end_column)
return all_columns
def get_ice_thickness_layers(RegID):
"""
This method returns the ice layes of a given registration (RegID) in regObs. it reads only what is below the first
solid ice layer. Thus snow and slush on the ice is not covered here and is added separately in the public method
for retrieving the full ice column.
This method is an internal method for getRegObdata.py
:param RegID:
:return:
Example og a ice layer object in regObs:
http://api.nve.no/hydrology/regobs/v0.9.5/Odata.svc/IceThicknessLayerV?$filter=RegID%20eq%2034801%20and%20LangKey%20eq%201&$format=json
"""
view = 'IceThicknessLayerV'
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{1}?" \
"$filter=RegID eq {2} and LangKey eq 1&$format=json"\
.format(se.odata_version, view, RegID)
data = requests.get(url).json()
datalist = data['d']['results']
layers = []
for l in datalist:
thickness = l['IceLayerThickness']
if thickness == None or float(thickness) == 0:
ml.log_and_print('getregobsdata.py -> get_ice_thickness_layers: RegID {0} har icelayers of None thicness.'.format(RegID))
# return empty list if some layers at zero or none.
reversed_layers = []
return reversed_layers
else:
regobs_layer_name = l['IceLayerName']
layer_type = get_tid_from_name('IceLayerKDV', regobs_layer_name)
layer_name = get_ice_type_from_tid(layer_type)
layer = ice.IceLayer(float(thickness), layer_name)
layers.append(layer)
return layers
def get_ice_type_from_tid(IceLayerTID):
"""Method returns a ice type available in the IceLayer class given the regObs type IceLayerTID.
:param IceLayerTID:
:return Ice type as string:
List of layertypes availabel in regObs:
http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/IceLayerKDV?$filter=Langkey%20eq%201%20&$format=json
"""
#
if IceLayerTID == 1:
return 'black_ice'
elif IceLayerTID == 3:
return 'slush_ice'
elif IceLayerTID == 5:
return 'slush'
elif IceLayerTID == 11: # 'Stålis i nedbrytning' in regObs
return 'black_ice'
elif IceLayerTID == 13: # 'Sørpeis i nedbrytning' in regObs
return 'slush_ice'
elif IceLayerTID == 14: # 'Stavis (våris)' in regObs
return 'slush_ice'
else:
return 'unknown'.format(IceLayerTID)
def get_tid_from_name(x_kdv, name):
"""
Gets a xTID for a given xName from a xKDV element in regObs. In other words, it gets the ID for a given name.
:param x_kdv:
:param name:
:return tid:
"""
x_kdv = get_kdv(x_kdv)
tid = -1
for xTID, xName in x_kdv.items():
if xName == name:
tid = xTID
return tid
def get_kdv(x_kdv, get_new=False):
"""Imports a x_kdv view from regObs and returns a dictionary with <key, value> = <ID, Name>
An x_kdv is requested from the regObs api if a pickle file newer than a week exists.
:param x_kdv: [string] x_kdv view
:return dict: {} x_kdv as a dictionary
Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
Ex of url for returning values for IceCoverKDV in norwegian:
http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json
"""
kdv_file = '{0}{1}.pickle'.format(se.kdv_elements_folder, x_kdv)
dict = {}
if get_new:
url = 'http://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?$filter=Langkey%20eq%201%20&$format=json'\
.format(se.odata_version, x_kdv)
ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from URL:{0}".format(url))
kdv = requests.get(url).json()
for a in kdv['d']['results']:
try:
if 'AvalCauseKDV' in url and a['ID'] > 9 and a['ID'] < 26: # this table gets special treatment
dict[a["ID"]] = a["Description"]
else:
dict[a["ID"]] = a["Name"]
except (RuntimeError, TypeError, NameError):
pass
mp.pickle_anything(dict, kdv_file)
else:
if os.path.exists(kdv_file):
# Useful to test if the file is old and if so make a new one
max_file_age = 7
mtime = os.path.getmtime(kdv_file)
last_modified_date = dt.datetime.fromtimestamp(mtime).date()
date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)
# If file older than date limit, request a new.
if last_modified_date < date_limit.date():
dict = get_kdv(x_kdv, get_new=True)
else:
# ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from pickle:{0}".format(kdv_file))
dict = mp.unpickle_anything(kdv_file, print_message=False)
else:
dict = get_kdv(x_kdv, get_new=True)
return dict
# webapi
# START VARSOMDATA
def _stringtime_2_datetime(stringtime):
"""Takes in a date as string, both given as unix datetime or normal local time, as string.
Method returns a normal datetime object.
:param stringtime:
:return: The date as datetime object
"""
if '/Date(' in stringtime: # oData gives unix time. Unix date time in milliseconds from 1.1.1970
unix_date_time = int(stringtime[6:-2])
unix_datetime_in_seconds = unix_date_time/1000 # For some reason they are given in miliseconds
date = dt.datetime.fromtimestamp(int(unix_datetime_in_seconds))
else: # regobs api gives local time
if '.' in stringtime: # though sometimes with seconds given with decimal places
non_decimal_stringtime = stringtime[0:stringtime.index('.')]
stringtime = non_decimal_stringtime
date = dt.datetime.strptime(stringtime, '%Y-%m-%dT%H:%M:%S')
### DOES REGOBS API RETURN UT TIME??? ###
return date
def _make_data_frame(list):
"""Takes a list of objects and makes a Pandas data frame.
:param list: [list of objects]
:return: [data frame]
"""
if len(list) == 0:
data_frame = pd.DataFrame()
else:
observation_fields = list[0].__dict__.keys()
data_frame = pd.DataFrame(columns=observation_fields)
i = 0
for l in list:
observation_values = l.__dict__.values()
data_frame.loc[i] = observation_values
i += 1
return data_frame
def _reg_types_dict(registration_tids=None):
"""Method maps single RegistrationTID values to the query dictionary used in regObs webapi
:param registration_tids: [int or list of int] Definition given below
:return:
Registration IDs and names
10 Fritekst
11 Ulykke/hendelse
12 Bilde
13 Faretegn
-14 Skader
21 Vær
22 Snødekke
23 Snøprofil
-24 Skredfaretegn
25 Stabilitetstest
26 Skredhendelse
27 Observert skredaktivitet(2011)
28 Skredfarevurdering (2012)
-29 Svakt lag
30 Skredfarevurdering (2013)
31 Skredfarevurdering
32 Skredproblem
33 Skredaktivitet
40 Snøskredvarsel
50 Istykkelse
51 Isdekningsgrad
61 Vannstand (2017)
62 Vannstand
71 Skredhendelse
80 Hendelser Grupperings type - Hendelser
81 Skred og faretegn Grupperings type - Skred og faretegn
82 Snødekke og vær Grupperings type - Snødekke og vær
83 Vurderinger og problemer Grupperings type - Vurderinger og problemer
"""
# If resources isn't a list, make it so
if not isinstance(registration_tids, list):
registration_tids = [registration_tids]
registration_dicts = []
for registration_tid in registration_tids:
if registration_tid is None:
return None
elif registration_tid == 10: # Fritekst
registration_dicts.append({'Id': 10, 'SubTypes': []})
elif registration_tid == 11: # Ulykke/hendelse
registration_dicts.append({'Id': 80, 'SubTypes': [11]})
elif registration_tid == 13: # Faretegn
registration_dicts.append({'Id': 81, 'SubTypes': [13]})
elif registration_tid == 21: # Vær
registration_dicts.append({'Id': 82, 'SubTypes': [21]})
elif registration_tid == 22: # Snødekke
registration_dicts.append({'Id': 82, 'SubTypes': [22]})
elif registration_tid == 23: # Snøprofil
registration_dicts.append({'Id': 82, 'SubTypes': [23]})
elif registration_tid == 25: # Stabilitetstest
registration_dicts.append({'Id': 82, 'SubTypes': [25]})
elif registration_tid == 26: # Skredhendelse
registration_dicts.append({'Id': 81, 'SubTypes': [26]})
elif registration_tid == 27: # Skredaktivitet(2011)
registration_dicts.append({'Id': 81, 'SubTypes': [27]})
elif registration_tid == 28: # Skredfarevurdering (2012)
registration_dicts.append({'Id': 83, 'SubTypes': [28]})
elif registration_tid == 30: # Skredfarevurdering (2013)
registration_dicts.append({'Id': 83, 'SubTypes': [30]})
elif registration_tid == 31: # Skredfarevurdering
registration_dicts.append({'Id': 83, 'SubTypes': [31]})
elif registration_tid == 32: # Skredproblem
registration_dicts.append({'Id': 83, 'SubTypes': [32]})
elif registration_tid == 33: # Skredaktivitet
registration_dicts.append({'Id': 81, 'SubTypes': [33]})
elif registration_tid == 50: # Istykkelse
registration_dicts.append({'Id': 50, 'SubTypes': []})
elif registration_tid == 51: # Isdekningsgrad
registration_dicts.append({'Id': 51, 'SubTypes': []})
else:
ml.log_and_print('getobservations.py -> _reg_types_dict: RegistrationTID {0} not supported (yet).'.format(registration_tid))
return registration_dicts
def _make_one_request(from_date=None, to_date=None, reg_id=None, registration_types=None,
region_ids=None, location_id=None, observer_id=None, observer_nick=None, observer_competence=None,
group_id=None, output='List', geohazard_tids=None, lang_key=1, recursive_count=5):
"""Part of get_data method. Parameters the same except observer_id and reg_id can not be lists.
"""
# Dates in the web-api request are strings
if isinstance(from_date, dt.date):
from_date = dt.date.strftime(from_date, '%Y-%m-%d')
elif isinstance(from_date, dt.datetime):
from_date = dt.datetime.strftime(from_date, '%Y-%m-%d')
if isinstance(to_date, dt.date):
to_date = dt.date.strftime(to_date, '%Y-%m-%d')
elif isinstance(to_date, dt.datetime):
to_date = dt.datetime.strftime(to_date, '%Y-%m-%d')
data = [] # data from one query
# query object posted in the request
rssquery = {'LangKey': lang_key,
'RegId': reg_id,
'ObserverGuid': None, # eg. '4d11f3cc-07c5-4f43-837a-6597d318143c',
'SelectedRegistrationTypes': _reg_types_dict(registration_types),
'SelectedRegions': region_ids,
'SelectedGeoHazards': geohazard_tids,
'ObserverId': observer_id,
'ObserverNickName': observer_nick,
'ObserverCompetence': observer_competence,
'GroupId': group_id,
'LocationId': location_id,
'FromDate': from_date,
'ToDate': to_date,
'NumberOfRecords': None, # int
'Offset': 0}
url = 'https://api.nve.no/hydrology/regobs/webapi_{0}/Search/Rss?geoHazard=0'.format(se.web_api_version)
more_available = True
# get data from regObs api. It returns 100 items at a time. If more, continue requesting with an offset. Paging.
while more_available:
# try or if there is an exception, try again.
try:
r = requests.post(url, json=rssquery)
responds = r.json()
data += responds['Results']
if output == 'Count nest':
ml.log_and_print('getobservations.py -> _make_one_request: total matches {0}'.format(responds['TotalMatches']))
return [responds['TotalMatches']]
except:
ml.log_and_print("getobservations.py -> _make_one_request: EXCEPTION. RECURSIVE COUNT {0}".format(recursive_count))
if recursive_count > 1:
recursive_count -= 1 # count down
data += _make_one_request(from_date=from_date,
to_date=to_date,
reg_id=reg_id,
registration_types=registration_types,
region_ids=region_ids,
location_id=location_id,
observer_id=observer_id,
observer_nick=observer_nick,
observer_competence=observer_competence,
group_id=group_id,
output=output,
geohazard_tids=geohazard_tids,
lang_key=lang_key,
recursive_count=recursive_count)
# log request status
if responds['TotalMatches'] == 0:
ml.log_and_print("getobservations.py -> _make_one_request: no data")
else:
ml.log_and_print('getobservations.py -> _make_one_request: {0:.2f}%'.format(len(data) / responds['TotalMatches'] * 100))
# if more get more by adding to the offset
if len(data) < responds['TotalMatches']:
rssquery["Offset"] += 100
else:
more_available = False
return data
def _get_general(registration_class_type, registration_types, from_date, to_date, region_ids=None, location_id=None,
observer_ids=None, observer_nick=None, observer_competence=None, group_id=None,
output='List', geohazard_tids=None, lang_key=1):
"""Gets observations of a requested type and mapps them to a class.
:param registration_class_type: [class for the requested observations]
:param registration_types: [int] RegistrationTID for the requested observation type
:param from_date: [date] A query returns [from_date, to_date]
:param to_date: [date] A query returns [from_date, to_date]
:param region_ids: [int or list of ints] If region_ids = None, all regions are selected
:param observer_ids: [int or list of ints] If observer_ids = None, all observers are selected
:param observer_nick: [int or list of ints] Default None gives all.
:param observer_competence: [string] Part of a observer nick name
:param group_id: [int]
:param output: [string] Options: 'List', 'DataFrame' and 'Count'. Default 'List'.
:param geohazard_tids [int or list of ints] 10 is snow, 20,30,40 are dirt, 60 is water and 70 is ice
:param lang_key [int] 1 is norwegian, 2 is english
:return:
"""
list = None
if output not in ['List', 'DataFrame', 'Count']:
ml.log_and_print('getobservations.py -> _get_general: Illegal output option.')
return list
# In these methods "Count" is obviously to count the list ov observations weras in the more general get_data
# counting a list and counting a nested list of full registratoins are two different tings.
output_for_get_data = output
if output == 'Count':
output_for_get_data = 'Count list'
# Dataframes are based on the lists
if output == 'DataFrame':
output_for_get_data = 'List'
# AvalancheEvaluation3 = 31 and is the table for observed avalanche evaluations.
data_with_more = get_data(from_date=from_date, to_date=to_date, region_ids=region_ids, observer_ids=observer_ids,
observer_nick=observer_nick, observer_competence=observer_competence,
group_id=group_id, location_id=location_id, lang_key=lang_key,
output=output_for_get_data, registration_types=registration_types, geohazard_tids=geohazard_tids)
# wash out all other observation types
data = []
if registration_types:
for d in data_with_more:
if d['RegistrationTid'] == registration_types:
data.append(d)
else: # regisrtation_types is None is for all registrations and no single type is picked out.
data = data_with_more
if output == 'List' or output == 'DataFrame':
list = [registration_class_type(d) for d in data]
list = sorted(list, key=lambda registration_class_type: registration_class_type.DtObsTime)
if output == 'List':
return list
if output == 'DataFrame':
return _make_data_frame(list)
if output == 'Count':
return data
def get_data(from_date=None, to_date=None, registration_types=None, reg_ids=None, region_ids=None, location_id=None,
observer_ids=None, observer_nick=None, observer_competence=None, group_id=None,
output='List', geohazard_tids=None, lang_key=1):
"""Gets data from regObs webapi. Each observation returned as a dictionary in a list.
:param from_date: [string] 'yyyy-mm-dd'. Result includes from date.
:param to_date: [string] 'yyyy-mm-dd'. Result includes to date.
:param lang_key: [int] Default 1 gives Norwegian.
:param reg_id: [int or list of ints] Default None gives all.
:param registration_types: [string or list of strings] Default None gives all.
:param region_ids: [int or list of ints]
:param geo_hazards: [int or list of ints] Default None gives all.
:param observer_id: [int or list of ints] Default None gives all.
:param observer_nick [string] Part of a observer nick name
:param observer_competence [int or list of int] as given in ComtetanceLevelKDV
:param group_id: [int]
:param location_id: [int]
:param output: [string] 'Nested' collects all observations in one regid in one entry (defult for webapi).
'List' is a flatt structure with one entry pr observation type.
'Count nest' makes one request and picks out info on total matches
'Count list' counts every from in every observation
:return: [list or int] Depending on output requested.
"""
# If resources isn't a list, make it so
if not isinstance(registration_types, list):
registration_types = [registration_types]
if not isinstance(region_ids, list):
region_ids = [region_ids]
if not isinstance(geohazard_tids, list):
geohazard_tids = [geohazard_tids]
# regObs weabapi does not support multiple ObserverIDs and RegIDs. Making it so.
if not isinstance(observer_ids, list):
observer_ids = [observer_ids]
if not isinstance(reg_ids, list):
reg_ids = [reg_ids]
# if output requested is 'Count' a number is expected, else a list og observations
all_data = []
for reg_id in reg_ids:
for observer_id in observer_ids:
data = _make_one_request(
from_date=from_date, to_date=to_date, lang_key=lang_key, reg_id=reg_id,
registration_types=registration_types, region_ids=region_ids, geohazard_tids=geohazard_tids,
observer_id=observer_id, observer_nick=observer_nick, observer_competence=observer_competence, group_id=group_id, location_id=location_id, output=output)
all_data += data
# Output 'Nested' is the structure returned from webapi. All observations on the same reg_id are grouped to one list item.
# Output 'List' all observation elements are made a separate item on list.
# Sums of each are available as 'Count list. and 'Count nest'.
if output == 'Count nest':
return sum(all_data)
# data sorted with ascending observation time
all_data = sorted(all_data, key=lambda d: d['DtObsTime'])
if output == 'Nested':
return all_data
elif output == 'List' or output == 'Count list':
listed_data = []
for d in all_data:
for o in d['Registrations']:
listed_data.append({**d, **o})
for p in d['Pictures']:
p['RegistrationName'] = 'Bilde'
listed_data.append({**d, **p})
if output == 'List':
return listed_data
if output == 'Count list':
return len(listed_data)
else:
ml.log_and_print('getobservations.py -> get_data: Unsupported output type.')
return None
# END VARSOMDATA
def _webapi_ice_col_to_ice_class(o):
"""This internal method maps an ice column object as given on webapi to the Ice.IceColumn class
:param o:
:return:
"""
reg_id = o['RegId']
layers = []
ordered_layers = sorted(o['FullObject']['IceThicknessLayers'], key=lambda l: l['IceLayerID'])
for layer in ordered_layers:
ice_type = get_ice_type_from_tid(layer['IceLayerTID'])
ice_layer_height = layer['IceLayerThickness']
if ice_layer_height is not None:
ice_layer = ice.IceLayer(ice_layer_height, ice_type)
layers.append(ice_layer)
date = dt.datetime.strptime(o['DtObsTime'][0:16], "%Y-%m-%dT%H:%M")
if o['FullObject']['IceThicknessSum'] is not None:
if len(layers) == 0:
layers = [ice.IceLayer(float(o['FullObject']['IceThicknessSum']), 'unknown')]
ice_column = ice.IceColumn(date, layers)
ice_column.add_metadata('OriginalObject', o)
ice_column.add_metadata('RegID', reg_id)
ice_column.add_metadata('LocationName', o['LocationName'])
ice_column.add_metadata('LocationID', o['LocationId'])
ice_column.add_metadata('UTMNorth', o['UtmNorth'])
ice_column.add_metadata('UTMEast', o['UtmEast'])
ice_column.add_metadata('UTMZone', o['UtmZone'])
ice_column.add_layer_at_index(0, ice.IceLayer(o['FullObject']['SlushSnow'], 'slush'))
ice_column.add_layer_at_index(0, ice.IceLayer(o['FullObject']['SnowDepth'], 'snow'))
ice_column.merge_and_remove_excess_layers()
ice_column.update_draft_thickness()
ice_column.update_top_layer_is_slush()
# I tried to reference ice column to water surface given ice height after or slush snow, but then what if
# ice height before is given. And what if there are combinations. To many possibilities in regObs..
# Iv calculate a theoretical ice height and use that.
# ice_column.update_water_line()
iha = o['FullObject']['IceHeightAfter']
# ihb = o['FullObject']['IceHeightBefore']
# if ice height after is not given I make an estimate so that I know where to put it in the plot
if iha is None:
ice_column.update_water_line()
ice_column.add_metadata('IceHeightAfter', 'Modelled')
iha = ice_column.draft_thickness - ice_column.water_line
# Probably dont need the test of topp layer is slush because it is includet in draft thickness
# if ice_column.top_layer_is_slush:
# iha = iha + const.snow_pull_on_water
ice_column.water_line = ice_column.draft_thickness - float(iha)
# correct level if top layer was slush
if ice_column.top_layer_is_slush is True:
for layer in ice_column.column:
if layer.get_enum() > 20: # material types >= 20 are snow
continue
elif layer.get_enum() == 2: # slush
ice_column.water_line -= layer.height
break # only the top most slush layer counts
return ice_column
else:
return None
def get_ice_thickness_today():
"""Gets all the observed ice thickness from regObs for today (and 2 days back)
:return: ice_thickeness_obs_dict
"""
to_date = dt.date.today()
from_date = to_date - dt.timedelta(days=2)
ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
ice_thickeness_obs_dict = {}
for o in ice_thickeness_obs:
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
ice_thickeness_obs_dict[o['RegId']] = ice_column
return ice_thickeness_obs_dict
def get_ice_thickness_observations(year, reset_and_get_new=False):
"""Gets all the observed ice thickness (RegistrationTID = 50) from regObs for one year.
The inner workings of the method:
1. We have an option of resetting local storage (delete pickle) and thus forcing the get_new.
2.1 Try opening a pickle, if it doesnt exist, an exception is thrown and we get new data.
2.2 If the requested data is from a previous season, no changes are expected, so load the pickle
without adding the last observations registered in regObs. Anyway, don't get new data.
2.3 If the requested data is from this season, set request from_date to the last modified
date of the pickle and 7 days past that. Add these last obs to the pickle data, and thus it is not
necessary to get new.
3. If get new, it gets all new data for the season.
4. Else, load pickle and if some last obs are to be added, do so.
:param year: [string] Eg '2017-18'
:param reset_and_get_new: [bool]
:return: ice_thickeness_obs_dict
"""
log_referance = 'getregobsdata.py -> get_ice_thickness_observations'
pickle_file_name = '{0}get_ice_thickness_observations_{1}.pickle'.format(se.local_storage, year)
# 1. Remove pickle if it exists, forcing the get_new
if reset_and_get_new:
try:
os.remove(pickle_file_name)
except OSError:
pass
from_date, to_date = gm.get_dates_from_year(year)
add_last_obs = None
get_new = None
try:
mtime = os.path.getmtime(pickle_file_name)
last_modified_date = dt.datetime.fromtimestamp(mtime).date()
# if file newer than the season (that is, if this is historical data), load it without requesting new.
dt_to_date = dt.datetime.strptime(to_date, '%Y-%m-%d').date()
if last_modified_date > dt_to_date:
add_last_obs = False
else:
add_last_obs = True
to_date = dt.date.today()
from_date = last_modified_date - dt.timedelta(days=7)
get_new = False
except OSError:
# file does not exists, so get_new.
ml.log_and_print("{0}: No matching pickle found, getting new data.".format(log_referance))
get_new = True
if get_new:
ml.log_and_print('{0}: Getting new for year {1}.'.format(log_referance, year))
ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
ice_thickeness_obs_dict = {}
for o in ice_thickeness_obs:
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
ice_thickeness_obs_dict[o['RegId']] = ice_column
mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)
else:
ice_thickeness_obs_dict = mp.unpickle_anything(pickle_file_name)
if add_last_obs:
ml.log_and_print("{0}: Adding observations from {1} to {2}".format(log_referance, from_date, to_date))
new_ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
new_ice_thickeness_obs_dict = {}
for o in new_ice_thickeness_obs:
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
new_ice_thickeness_obs_dict[o['RegId']] = ice_column
for k,v in new_ice_thickeness_obs_dict.items():
ice_thickeness_obs_dict[k] = v
mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)
return ice_thickeness_obs_dict
def get_all_season_ice(year, get_new=True):
"""Returns observed ice columns from regObs-webapi over a requested season. Ice covers representing
first ice or ice cover lost are represented by an ice column of zero height.
The workings of this routine:
1. Get one season of data from regobs-api, spreads them out to a long list.
2. Pick out only cover and column and group them on location_ids. We keep only locations with
date for fist ice that season. All observations are mapped to the cover and column class in Ice.py.
3. Map all covers where first_ice or ice_cover_lost is True to zero-height columns. Remove the rest.
If get_new=True new data is retrieved. If get_new=false data is picked from pickle.
:param year:
:param get_new:
:return:
"""
file_name_and_path = '{0}get_all_season_ice_{1}.pickle'.format(se.local_storage, year)
from_date, to_date = gm.get_dates_from_year(year)
if get_new:
all_observations = get_data(from_date=from_date, to_date=to_date, geohazard_tids=70)
all_locations = {}
for o in all_observations:
if o['RegistrationTid'] == 51 or o['RegistrationTid'] == 50:
if o['LocationId'] in all_locations.keys():
all_locations[o['LocationId']].append(o)
else:
all_locations[o['LocationId']] = [o]
# sort oldest first on each location
for l, obs in all_locations.items():
sorted_list = sorted(obs, key=lambda d: d['DtObsTime'])
all_locations[l] = sorted_list
# Use only locations with verified "first ice cover" date.
all_locations_with_first_ice = {}
for l, obs in all_locations.items():
for o in obs:
if o['RegistrationTid'] == 51:
# if the ice cover is partly or fully formed on observation location or the lake
# 2) delvis islagt på målestedet
# 3) helt islagt på målestedet
# 21) hele sjøen islagt
if (o['FullObject']['IceCoverTID'] == 2) or (o['FullObject']['IceCoverTID'] == 3) or \
(o['FullObject']['IceCoverTID'] == 21):
# and if ice cover before was
# 1) isfritt på målestedet
# 2) delvis islagt på målestedet,
# 11) islegging langs land
# 20) hele sjøen isfri, this is fist ice
if (o['FullObject']['IceCoverBeforeTID'] == 1) or (o['FullObject']['IceCoverBeforeTID'] == 2) or \
(o['FullObject']['IceCoverBeforeTID'] == 11) or (o['FullObject']['IceCoverBeforeTID'] == 20):
all_locations_with_first_ice[l] = obs
# Map all observations from regObs-webapi result structure to the classes in ice.py
all_locations_with_classes = {}
for l, obs in all_locations_with_first_ice.items():
all_locations_with_classes[l] = []
location_name = obs[0]['LocationName']
previous_cover = ice.IceCover(dt.datetime.strptime(from_date, "%Y-%m-%d").date(), "Ikke gitt", 'Ikke gitt', location_name)
for o in obs:
if o['RegistrationTid'] == 51:
cover_date = dt.datetime.strptime(o['DtObsTime'][0:16], "%Y-%m-%dT%H:%M")
cover = o['FullObject']['IceCoverTName']
cover_before = o['FullObject']['IceCoverBeforeTName']
cover_after = o['FullObject']['IceCoverAfterTName']
cover_tid = o['FullObject']['IceCoverTID']
cover_before_tid = o['FullObject']['IceCoverBeforeTID']
cover_after_tid = o['FullObject']['IceCoverAfterTID']
this_cover = ice.IceCover(cover_date, cover, cover_before, location_name)
this_cover.set_regid(o['RegId'])
this_cover.set_locationid(o['LocationId'])
this_cover.set_utm(o['UtmNorth'], o['UtmEast'], o['UtmZone'])
this_cover.set_cover_after(cover_after, cover_after_tid)
this_cover.add_original_object(o)
# if the ice cover is partly or fully formed on observation location or the lake
# 2) delvis islagt på målestedet
# 3) helt islagt på målestedet
# 21) hele sjøen islagt
if cover_tid == 2 or cover_tid == 3 or cover_tid == 21:
# and if ice cover before was
# 1) isfritt, nå første is på målestedet på målestedet
# 2) isfritt, nå første is ved land
# 4) Gradvis islegging
if cover_before_tid == 1 or cover_before_tid == 2 or cover_before_tid == 4:
this_cover.mark_as_first_ice()
# if the ice cover is partly or fully gone on location and there was ice yesterday
# 1) Isfritt på målestedet
# 2) delvis islagt på målestedet
# 20) Hele sjøen isfri
if cover_tid == 1 or cover_tid == 2 or cover_tid == 20:
# 10) isfritt resten av vinteren
# Accepts also ice free observation after 15. March
to_year = this_cover.date.year
first_accepted_date = dt.datetime(to_year, 3, 15)
last_accepted_date = dt.datetime(to_year, 9, 1)
if cover_after_tid == 10 or (cover_date > first_accepted_date and cover_date < last_accepted_date):
this_cover.mark_as_ice_cover_lost()
# copy of this cover so that in next iteration I may look up previous cover.
previous_cover = cp.deepcopy(this_cover)
all_locations_with_classes[l].append(this_cover)
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
all_locations_with_classes[l].append(ice_column)
# Map all covers where first_ice or ice_cover_lost is True to zero-height columns. Remove all the rest.
all_locations_with_columns = {}
for k, v in all_locations_with_classes.items():
new_v = []
for o in v:
if isinstance(o, ice.IceCover):
if o.first_ice or o.ice_cover_lost:
new_o = ice.IceColumn(o.date, [])
new_o.add_metadata('OriginalObject', o.metadata['OriginalObject'])
new_o.add_metadata('UTMEast', o.metadata['UTMEast'])
new_o.add_metadata('UTMNorth', o.metadata['UTMNorth'])
new_o.add_metadata('UTMZone', o.metadata['UTMZone'])
new_o.add_metadata('LocationName', o.locationName)
new_o.add_metadata('LocationID', o.LocationID)
new_v.append(new_o)
else:
new_v.append(o)
all_locations_with_columns[k] = new_v
mp.pickle_anything(all_locations_with_columns, file_name_and_path)
else:
# if pickle file with all data for the season does not exist, get data anyway
if not os.path.exists(file_name_and_path):
all_locations_with_columns = get_all_season_ice(year, get_new=True)
else:
all_locations_with_columns = mp.unpickle_anything(file_name_and_path, print_message=False)
return all_locations_with_columns
def get_observations_on_location_id(location_id, year, get_new=False):
"""Uses new or stored data from get_all_season_ice and picks out one requested location.
First ice cover is mapped to Ice.IceColumn of zero height. Ice cover lost (mid season or last) the same.
:param location_id: [int] location id as used in regObs
:param year: [string] Eg '2018-19'
:param get_new: [bool] if get_new, new data is requested from regObs
:return: [list of IceThickness]
"""
all_locations = get_all_season_ice(year, get_new=get_new)
# get_all_season_ice returns a dictionary with observations grouped by location_id.
observations_on_location_for_modeling = []
try:
observations_on_location_for_modeling = all_locations[location_id]
except Exception as e:
ml.log_and_print("getregobsdata.py -> get_observations_on_location_id: {0} not found probably..".format(location_id), print_it=True)
return observations_on_location_for_modeling
def get_new_regobs_data():
get_all_season_ice('2019-20')
get_all_season_ice('2018-19')
get_all_season_ice('2017-18')
get_all_season_ice('2016-17')
get_all_season_ice('2015-16')
get_all_season_ice('2014-15')
get_all_season_ice('2013-14')
get_all_season_ice('2012-13')
get_all_season_ice('2011-12')
if __name__ == "__main__":
get_new_regobs_data()
# ice_column = get_ice_thickness_on_regid(130979)
# ice_thicks = get_ice_thickness_observations('2017-18')
# ic = get_ice_cover(LocationNames, from_date, to_date)
# first = get_first_ice_cover(LocationNames, from_date, to_date)
# last = get_last_ice_cover(LocationNames, from_date, to_date)
# ith = get_ice_thickness(LocationNames, from_date, to_date)
# all_on_locations = get_all_season_ice_on_location(LocationNames, from_date, to_date)
# all_in_all = get_all_season_ice('2016-17', get_new=True)
pass
| mit |
ppp2006/runbot_number0 | qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/matplotlib_util.py | 3 | 8282 | #
# Copyright (c) 2009, Georgia Tech Research Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Georgia Tech Research Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# \author Advait Jain (Healthcare Robotics Lab, Georgia Tech.)
import pylab as pl
import math, numpy as np
from matplotlib.patches import Ellipse
## calls pylab.figure() and returns the figure
# other params can be added if required.
# @param dpi - changing this can change the size of the font.
def figure(fig_num=None, dpi=None):
#mpu.pl.figure(num=None, figsize=(4, 4), dpi=100, facecolor='w', edgecolor='k')
return pl.figure(fig_num, dpi=dpi, facecolor='w')
## calls pylab.savefig()
def savefig(fname):
pl.savefig(fname)
## legend drawing helper
# @param loc - 'best', 'upper left' ...
# @param display_mode - 'normal', 'less_space'
def legend(loc='best',display_mode='normal', draw_frame = True,
handlelength=0.003):
params = {'legend.fontsize': 10}
pl.rcParams.update(params)
if display_mode == 'normal':
leg = pl.legend(loc=loc)
leg.draw_frame(draw_frame)
elif display_mode == 'less_space':
leg = pl.legend(loc=loc,handletextpad=0.7,handlelength=handlelength,labelspacing=0.01,
markerscale=0.5)
leg.draw_frame(draw_frame)
##
# generate a random color.
# @return string of the form #xxxxxx
def random_color():
r = '%02X'%np.random.randint(0, 255)
g = '%02X'%np.random.randint(0, 255)
b = '%02X'%np.random.randint(0, 255)
c = '#' + r + g + b
return c
##
# @param figure width in cm
# @param figure height in cm
def set_figure_size(fig_width, fig_height):
inches_per_cm = 1./2.54
fig_width = fig_width * inches_per_cm # width in inches
fig_height = fig_height * inches_per_cm # height in inches
fig_size = [fig_width, fig_height]
params = {'backend': 'WXAgg',
'axes.labelsize': 12,
'text.fontsize': 12,
'legend.fontsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True,
'figure.figsize': fig_size}
pl.rcParams.update(params)
## typical usage: ax = pp.gca(); mpu.flip_x_axis(ax)
def flip_x_axis(ax):
ax.set_xlim(ax.get_xlim()[::-1])
## typical usage: ax = pp.gca(); mpu.flip_y_axis(ax)
def flip_y_axis(ax):
ax.set_ylim(ax.get_ylim()[::-1])
## plot an ellipse
# @param mn - center of ellipe. (2x1 np matrix)
# @param P - covariance matrix.
def plot_ellipse_cov(pos, P, edge_color, face_color='w', alpha=1.):
U, s , Vh = np.linalg.svd(P)
ang = math.atan2(U[1,0],U[0,0])
w1 = 2.0*math.sqrt(s[0])
w2 = 2.0*math.sqrt(s[1])
return plot_ellipse(pos, ang, w1, w2, edge_color, face_color,
alpha)
## plot an ellipse
# @param mn - center of ellipe. (2x1 np matrix)
# @param angle of the ellipse (RADIANS)
def plot_ellipse(pos, angle, w1, w2, edge_color, face_color='w',
alpha=1.):
orient = math.degrees(angle)
e = Ellipse(xy=pos, width=w1, height=w2, angle=orient,
facecolor=face_color, edgecolor=edge_color)
e.set_alpha(alpha)
ax = pl.gca()
ax.add_patch(e)
return e
## plot circle (or an arc counterclockwise starting from the y-axis)
# @param cx - x coord of center of circle.
# @param cy - y coord of center of circle.
# @param rad - radius of the circle
# @param start_angle - starting angle for the arcin RADIANS. (0 is y axis)
# @param end_angle - ending angle for the arcin RADIANS. (0 is y axis)
# @param step - step size for the linear segments.
# @param color - color of the circle.
# @param label - legend label.
#
# circle plotted as bunch of linear segments. back to LOGO days.
def plot_circle(cx, cy, rad, start_angle, end_angle, step=math.radians(2),
color='k', label='', alpha=1.0):
if start_angle>end_angle:
step = -step
n_step = int((end_angle-start_angle)/step+0.5)
x,y=[],[]
for i in range(n_step):
x.append(cx-rad*math.sin(start_angle+i*step))
y.append(cy+rad*math.cos(start_angle+i*step))
x.append(cx-rad*math.sin(end_angle))
y.append(cy+rad*math.cos(end_angle))
pl.axis('equal')
return pl.plot(x,y,c=color,label=label,linewidth=2, alpha=alpha)
## plot radii at regular angular intervals.
# @param cx - x coord of center of circle.
# @param cy - y coord of center of circle.
# @param rad - radius of the circle
# @param start_angle - starting angle for the arcin RADIANS. (0 is y axis)
# @param end_angle - ending angle for the arcin RADIANS. (0 is y axis)
# @param interval - angular intervals for the radii
# @param color - color of the circle.
# @param label - legend label.
def plot_radii(cx, cy, rad, start_angle, end_angle, interval=math.radians(15),
color='k', label='', alpha=1.0):
if start_angle < 0.:
start_angle = 2*math.pi+start_angle
if end_angle < 0.:
end_angle = 2*math.pi+end_angle
if start_angle>end_angle:
interval = -interval
n_step = int((end_angle-start_angle)/interval+0.5)
x,y=[],[]
for i in range(n_step):
x.append(cx)
y.append(cy)
x.append(cx-rad*math.sin(start_angle+i*interval))
y.append(cy+rad*(math.cos(start_angle+i*interval)))
x.append(cx)
y.append(cy)
x.append(cx-rad*math.sin(end_angle))
y.append(cy+rad*math.cos(end_angle))
pl.plot(x,y,c=color,label=label,linewidth=1,alpha=alpha)
pl.axis('equal')
## plot a quiver
# @param y - list/1D array
# @param x - list/1D array
# @param v - 2xN or 3xN np matrix of vectors.
# @param width - thickness of the arrows.
# @param scale - decreasing the scale increases the length of the arrows.
def plot_quiver_yxv(y, x, v, color='k', width=0.005, scale=100.0):
pl.quiver(x, y, v[0,:].A1,v[1,:].A1, width=width, color=color, scale=scale)
## plot a histogram.
# @param left - left edge of the bin. (array-like)
# @param height - height of each bin (array-like)
def plot_histogram(left, height, width=0.8, label='',
align='center', color='b', alpha=1.):
pb_obj = pl.bar(left, height, width=width, align=align,
color=color, alpha=alpha, label=label, linewidth=0)
return pb_obj
# 3d in matplotlib is buggy. Using mayavi instead.
##---------- 3D plotting ---------------
#
###
## @param scatter_size - as of Aug 17, 2010 and matplotlib version 0.99.1.1, scatter_size does not work
#def plot_xyz(x, y, z, color='b', scatter_size=20, fig=None, xlabel='',
# ylabel='', zlabel=''):
# if fig == None:
# fig = pl.figure()
# ax = fig.gca(projection='3d')
## ax = Axes3D(fig)
## ax.plot(x, y, z, c=color)
# ax.scatter(x, y, z, s=scatter_size)
#
## ax.set_xlabel(xlabel)
## ax.set_ylabel(ylabel)
## ax.set_zlabel(zlabel)
## ax.legend()
| lgpl-2.1 |
cloudera/ibis | ibis/backends/clickhouse/tests/test_operators.py | 1 | 7561 | import operator
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
from ibis import literal as L
pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
@pytest.mark.parametrize(
('left', 'right', 'type'),
[
(L('2017-04-01'), date(2017, 4, 2), dt.date),
(date(2017, 4, 2), L('2017-04-01'), dt.date),
(
L('2017-04-01 01:02:33'),
datetime(2017, 4, 1, 1, 3, 34),
dt.timestamp,
),
(
datetime(2017, 4, 1, 1, 3, 34),
L('2017-04-01 01:02:33'),
dt.timestamp,
),
],
)
@pytest.mark.parametrize(
'op',
[
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
],
)
def test_string_temporal_compare(con, op, left, right, type):
expr = op(left, right)
result = con.execute(expr)
left_raw = con.execute(L(left).cast(type))
right_raw = con.execute(L(right).cast(type))
expected = op(left_raw, right_raw)
assert result == expected
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
(operator.add, L(3), L(4), 7),
(operator.sub, L(3), L(4), -1),
(operator.mul, L(3), L(4), 12),
(operator.truediv, L(12), L(4), 3),
(operator.pow, L(12), L(2), 144),
(operator.mod, L(12), L(5), 2),
(operator.truediv, L(7), L(2), 3.5),
(operator.floordiv, L(7), L(2), 3),
(lambda x, y: x.floordiv(y), L(7), 2, 3),
(lambda x, y: x.rfloordiv(y), L(2), 7, 3),
],
)
def test_binary_arithmetic(con, func, left, right, expected):
expr = func(left, right)
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(lambda a, b: a + b, '`int_col` + `tinyint_col`'),
(lambda a, b: a - b, '`int_col` - `tinyint_col`'),
(lambda a, b: a * b, '`int_col` * `tinyint_col`'),
(lambda a, b: a / b, '`int_col` / `tinyint_col`'),
(lambda a, b: a ** b, 'pow(`int_col`, `tinyint_col`)'),
(lambda a, b: a < b, '`int_col` < `tinyint_col`'),
(lambda a, b: a <= b, '`int_col` <= `tinyint_col`'),
(lambda a, b: a > b, '`int_col` > `tinyint_col`'),
(lambda a, b: a >= b, '`int_col` >= `tinyint_col`'),
(lambda a, b: a == b, '`int_col` = `tinyint_col`'),
(lambda a, b: a != b, '`int_col` != `tinyint_col`'),
],
)
def test_binary_infix_operators(con, alltypes, translate, op, expected):
a, b = alltypes.int_col, alltypes.tinyint_col
expr = op(a, b)
assert translate(expr) == expected
assert len(con.execute(expr))
# TODO: test boolean operators
# (h & bool_col, '`h` AND (`a` > 0)'),
# (h | bool_col, '`h` OR (`a` > 0)'),
# (h ^ bool_col, 'xor(`h`, (`a` > 0))')
@pytest.mark.parametrize(
('op', 'expected'),
[
(
lambda a, b, c: (a + b) + c,
'(`int_col` + `tinyint_col`) + `double_col`',
),
(lambda a, b, c: a.log() + c, 'log(`int_col`) + `double_col`'),
(
lambda a, b, c: (b + (-(a + c))),
'`tinyint_col` + (-(`int_col` + `double_col`))',
),
],
)
def test_binary_infix_parenthesization(con, alltypes, translate, op, expected):
a = alltypes.int_col
b = alltypes.tinyint_col
c = alltypes.double_col
expr = op(a, b, c)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_between(con, alltypes, translate):
expr = alltypes.int_col.between(0, 10)
assert translate(expr) == '`int_col` BETWEEN 0 AND 10'
assert len(con.execute(expr))
@pytest.mark.parametrize(
('left', 'right'),
[
(L('2017-03-31').cast(dt.date), date(2017, 4, 2)),
(date(2017, 3, 31), L('2017-04-02').cast(dt.date)),
],
)
def test_string_temporal_compare_between_dates(con, left, right):
expr = ibis.timestamp('2017-04-01').cast(dt.date).between(left, right)
result = con.execute(expr)
assert result
@pytest.mark.parametrize(
('left', 'right'),
[
(
L('2017-03-31 00:02:33').cast(dt.timestamp),
datetime(2017, 4, 1, 1, 3, 34),
),
(
datetime(2017, 3, 31, 0, 2, 33),
L('2017-04-01 01:03:34').cast(dt.timestamp),
),
],
)
def test_string_temporal_compare_between_datetimes(con, left, right):
expr = ibis.timestamp('2017-04-01 00:02:34').between(left, right)
result = con.execute(expr)
assert result
@pytest.mark.parametrize('container', [list, tuple, set])
def test_field_in_literals(con, alltypes, translate, container):
foobar = container(['foo', 'bar', 'baz'])
expected = tuple(set(foobar))
expr = alltypes.string_col.isin(foobar)
assert translate(expr) == "`string_col` IN {}".format(expected)
assert len(con.execute(expr))
expr = alltypes.string_col.notin(foobar)
assert translate(expr) == "`string_col` NOT IN {}".format(expected)
assert len(con.execute(expr))
@pytest.mark.parametrize('column', ['int_col', 'float_col', 'bool_col'])
def test_negate(con, alltypes, translate, column):
# clickhouse represent boolean as UInt8
expr = -getattr(alltypes, column)
assert translate(expr) == '-`{0}`'.format(column)
assert len(con.execute(expr))
@pytest.mark.parametrize(
'field',
[
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'year',
'month',
],
)
def test_negate_non_boolean(con, alltypes, field, df):
t = alltypes.limit(10)
expr = t.projection([(-t[field]).name(field)])
result = expr.execute()[field]
expected = -df.head(10)[field]
tm.assert_series_equal(result, expected)
def test_negate_literal(con):
expr = -L(5.245)
assert round(con.execute(expr), 3) == -5.245
@pytest.mark.parametrize(
('op', 'pandas_op'),
[
(
lambda t: (t.double_col > 20).ifelse(10, -20),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
),
),
(
lambda t: (t.double_col > 20).ifelse(10, -20).abs(),
lambda df: (
pd.Series(np.where(df.double_col > 20, 10, -20))
.abs()
.astype('int8')
),
),
],
)
def test_ifelse(alltypes, df, op, pandas_op, translate):
expr = op(alltypes)
result = expr.execute()
result.name = None
expected = pandas_op(df)
tm.assert_series_equal(result, expected)
def test_simple_case(con, alltypes, translate):
t = alltypes
expr = (
t.string_col.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default')
.end()
)
expected = """CASE `string_col`
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END"""
assert translate(expr) == expected
assert len(con.execute(expr))
def test_search_case(con, alltypes, translate):
t = alltypes
expr = (
ibis.case()
.when(t.float_col > 0, t.int_col * 2)
.when(t.float_col < 0, t.int_col)
.else_(0)
.end()
)
expected = """CASE
WHEN `float_col` > 0 THEN `int_col` * 2
WHEN `float_col` < 0 THEN `int_col`
ELSE 0
END"""
assert translate(expr) == expected
assert len(con.execute(expr))
| apache-2.0 |
wanggang3333/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
oferb/OpenTrains | webserver/opentrain/algorithm/django_examples.py | 1 | 1925 | import gtfs.models
import analysis.models
import numpy as np
from scipy import spatial
import shelve
try:
import matplotlib.pyplot as plt
except ImportError:
pass
import simplekml
import config
import itertools
import os
def print_all(route_id):
results = gtfs.models.Trip.objects.filter(route_id=route_id)
for result in results:
print result.trip_id
def print_all_stop_names():
results = gtfs.models.Stop.objects.all().values_list('stop_name')
for result in results:
print result
def get_all_routes(do_print=True):
routes = gtfs.models.Route.objects.all().values_list('route_id', 'route_long_name')
if do_print:
for route in routes:
print route
return routes
def print_all_shape_long_lat(shape_id):
results = gtfs.models.Shape.objects.filter(shape_id=shape_id)
for result in results:
print '%s, %s, %s' % (result.shape_pt_sequence, result.shape_pt_lat, result.shape_pt_lon)
def print_all_shape_ids():
results = set(gtfs.models.Shape.objects.all().values_list('shape_id'))
for result in results:
print result
def print_device_id(device_id):
count = 0
results = analysis.models.Report.objects.filter(device_id=device_id)
for result in results:
count = count + 1
print("%d,%s,%s" % (count, result.my_loc.lat, result.my_loc.lon))
def print_all_devices():
results = analysis.models.Report.objects.all().values_list('device_id')
for result in results:
print result
def print_device_wifis(device_id):
results = analysis.models.Report.objects.all()
for result in results:
print result, result.get_wifi_set_all()
#print_all_stop_names();
#print_device_id("02090d12")
#print_device_id("aaa")
#print_all_devices("aaa")
#print_device_wifis("aaa")
#print_all_route_long_name()
#print_all_shape_long_lat('51_00001')
#print_all_shape_ids() | bsd-3-clause |
LxMLS/lxmls-toolkit | labs/scripts/non_linear_sequence_classifiers/exercise_2.py | 1 | 3448 |
# coding: utf-8
# ### WSJ Data
# In[ ]:
# Load Part-of-Speech data
from lxmls.readers.pos_corpus import PostagCorpusData
data = PostagCorpusData()
# ### Check Numpy and Pytorch Gradients match
# As we did with the feed-forward network, we will no implement a Recurrent Neural Network (RNN) in Pytorch. For this complete the log `forward()` method in
#
# lxmls/deep_learning/pytorch_models/rnn.py
#
# Load the RNN model in numpy and Python for comparison
# In[ ]:
from lxmls.deep_learning.numpy_models.rnn import NumpyRNN
numpy_model = NumpyRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1
)
# In[ ]:
from lxmls.deep_learning.pytorch_models.rnn import PytorchRNN
model = PytorchRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1
)
# To debug your code you can compare the numpy and Pytorch gradients using
# In[ ]:
# Get gradients for both models
batch = data.batches('train', batch_size=1)[0]
gradient_numpy = numpy_model.backpropagation(batch['input'], batch['output'])
gradient = model.backpropagation(batch['input'], batch['output'])
# In[ ]:
gradient[0].shape, gradient_numpy[0].shape
# and then plotting them with matplotlib
# In[ ]:
import matplotlib.pyplot as plt
# Gradient for word embeddings in the example
plt.subplot(2,2,1)
plt.imshow(gradient_numpy[0][batch['input'], :], aspect='auto', interpolation='nearest')
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(gradient[0].numpy()[batch['input'], :], aspect='auto', interpolation='nearest')
plt.colorbar()
# Gradient for word embeddings in the example
plt.subplot(2,2,3)
plt.imshow(gradient_numpy[1], aspect='auto', interpolation='nearest')
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(gradient[1].numpy(), aspect='auto', interpolation='nearest')
plt.colorbar()
plt.show()
# In[ ]:
# Alterbative native CuDNN native implementation of RNNs
from lxmls.deep_learning.pytorch_models.rnn import FastPytorchRNN
fast_model = FastPytorchRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1
)
# ### Train model
# Once you are confident that your implementation is working correctly you can run it on the POS task using the Pytorch code from the Exercise 6.1.
# In[ ]:
num_epochs = 10
# In[ ]:
model = model
# In[ ]:
import numpy as np
import time
# Get batch iterators for train and test
train_batches = data.batches('train', batch_size=1)
dev_set = data.batches('dev', batch_size=1)
test_set = data.batches('test', batch_size=1)
# Epoch loop
start = time.time()
for epoch in range(num_epochs):
# Batch loop
for batch in train_batches:
model.update(input=batch['input'], output=batch['output'])
# Evaluation dev
is_hit = []
for batch in dev_set:
is_hit.extend(model.predict(input=batch['input']) == batch['output'])
accuracy = 100*np.mean(is_hit)
# Inform user
print("Epoch %d: dev accuracy %2.2f %%" % (epoch+1, accuracy))
print("Training took %2.2f seconds per epoch" % ((time.time() - start)/num_epochs))
# Evaluation test
is_hit = []
for batch in test_set:
is_hit.extend(model.predict(input=batch['input']) == batch['output'])
accuracy = 100*np.mean(is_hit)
# Inform user
print("Test accuracy %2.2f %%" % accuracy)
| mit |
Openergy/oplus | oplus/output_table.py | 1 | 4492 | import os
import pandas as pd
from oplus.configuration import CONF
def to_float_if_possible(s):
try:
return float(s)
except ValueError:
if s.strip() == "":
return None
else:
return s
class OutputTable:
def __init__(self, path):
if not os.path.isfile(path):
raise FileNotFoundError("No file at given path: '%s'." % path)
self._path = path
self._reports_d = self._parse() # {report_name: {table_name: df, ...}, ...}
def _parse(self):
# constants
_name_ = "name"
_columns_ = "columns"
_values_ = "values"
_index_ = "index"
# variables
raw_reports_d = {} # {report_name: [tables_d, ...], ...}
current_raw_tables_l = None
current_raw_table_d = None # {"name": "name", "columns": "columns", values: [[v1, ...], ...]}
columns_nb = None
# loop
with open(self._path, "r", encoding=CONF.encoding) as f:
while True:
# next line
try:
line_s = next(f).strip()
except StopIteration:
break
# use everything except table names and values
if line_s[:6] == "REPORT":
# find report name
report_name = line_s.split(",")[1].strip()
# create new report
current_raw_tables_l = []
raw_reports_d[report_name] = current_raw_tables_l
# create empty current_raw_table_d to initialize
current_raw_table_d = {_index_: [], _values_: []}
# skip two next lines
for i in range(2):
next(f)
continue
elif current_raw_tables_l is None:
# first table not reached yet, nothing to do
continue
elif line_s[:5] == "Note ": # end notes
break
# parse tables
if line_s.strip() == "":
if _columns_ in current_raw_table_d:
# end of data, we create a new current_raw_table_d
current_raw_table_d = {_index_: [], _values_: []}
elif _name_ not in current_raw_table_d:
# we know this table exists (we are not in end of file), so we name and append
current_raw_table_d[_name_] = line_s
current_raw_tables_l.append(current_raw_table_d)
elif _columns_ not in current_raw_table_d:
columns_l = line_s.split(",")[2:]
current_raw_table_d[_columns_] = columns_l
columns_nb = len(columns_l)
else:
line_l = line_s.split(",")
if len(line_l) <= 1: # comments sometimes follow a table, without a whitespace
continue
current_raw_table_d[_index_].append(",".join(line_l[1:-columns_nb]))
current_raw_table_d[_values_].append([to_float_if_possible(s) for s in line_l[-columns_nb:]])
# create dataframes
reports_d = {}
for report_name, raw_tables_l in raw_reports_d.items():
tables_d = {}
for raw_table_d in raw_tables_l:
tables_d[raw_table_d[_name_]] = pd.DataFrame(data=raw_table_d[_values_], index=raw_table_d[_index_],
columns=raw_table_d[_columns_])
reports_d[report_name] = tables_d
return reports_d
# ---------------------------------------- public api --------------------------------------------------------------
def get_table(self, table_name, report_name=None):
if report_name is None:
for rp_name, tables_d in self._reports_d.items():
if table_name in tables_d:
return tables_d[table_name]
raise KeyError("Table name '%s' not found." % table_name)
if report_name not in self._reports_d:
raise KeyError("Report name '%s' not found." % report_name)
tables_d = self._reports_d[report_name]
if table_name not in tables_d:
raise KeyError("Table name '%s' not found in report '%s'." % (table_name, report_name))
return tables_d[table_name]
| mpl-2.0 |
tdhopper/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
yanboliang/spark | python/pyspark/sql/session.py | 3 | 37286 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone, safecheck)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
jsqlContext = self._wrapped._jsqlContext
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(batches, ArrowStreamSerializer(), reader_func,
create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
mholtrop/Phys605 | Python/Getting_Started/CSV_Multi_Plot.py | 1 | 3996 | #!/usr/bin/env python
#
# This example expands on the CSV_Plot.py. It will open all the
# csv files that are given on the command line, and plot the data found
# in the files in a single plot.
#
import sys
import argparse
#
import os.path as path
import csv
import numpy as np # This gives numpy the shorthand np
import matplotlib.pyplot as plt
#
#
def main(argv=None):
''' This is the main program that runs, but it is also callable from the python
command line if you import this file into python. '''
# Parse the connand line arguments.
if argv is None:
argv = sys.argv[1:] # First item in sys.argv is progname
parser = argparse.ArgumentParser(description='A program to plot CSV files from the Analog Discovery.')
parser.add_argument('-b','--bode',action='store_true',help='Make bode plots, log x')
parser.add_argument('-s','--signal',action='store_true',help='Make signal plots, lin x')
parser.add_argument('files',type=str, nargs='+', help='input files')
args = parser.parse_args(argv)
if args.bode == False and args.signal == False:
print "Please supply either --bode or --signal option"
return
p_data = []
for f in args.files:
dirname,filename = path.split(f)
basename, ext = path.splitext(filename)
if ext.lower() != '.csv':
print "File {} does not appear to be a CSV file. Skipped.".format(filename)
continue
infile = open(f)
#
# Pass the file to the csv parser
#
data = csv.reader(infile)
line = data.next()
while len(line) == 0 or line[0][0]=='#': # Skip comments and empty lines.
line = data.next()
headers = data.next()
dat = np.array([ [float(z) for z in x] for x in data ]) # put the data in dat as floats.
p_data.append( (basename,dat))
#
# Now plot the data. plt.plot returns a tuple (plot, )
#
plt.figure(figsize=(10,7))
plots = []
for name,dat in p_data:
x_ar = [float(x[0]) for x in dat ] # Selects 1st data column
lab = name.split('_')[2]
if args.bode:
y_ar = [float(x[2]) for x in dat ] # Select 3rd data column
ph_ar = [float(x[3]) for x in dat ]
for i in range(len(ph_ar)):
if ph_ar[i]>0:
ph_ar[i]=ph_ar[i]-360. # Fix the phase so the crossimg plots better.
plt.subplot(2,1,1)
(p1,) = plt.plot(x_ar,y_ar,label=lab)
plt.title('Bode Plot of Transistor Amplifier')
plt.xlabel('F[Hz]',position=(0.9,1))
plt.ylabel('Magnitude [dB]')
plt.xscale('log')
plt.grid(True)
plt.subplot(2,1,2)
(p2,) = plt.plot(x_ar,ph_ar,label=lab)
plt.title('Phase Plot of Transistor Amplifier')
plt.xlabel('F[Hz]',position=(0.9,1))
plt.ylabel('Phase [degrees]')
plt.xscale('log')
plt.grid(True)
plots.append(p1)
if args.signal:
y_ar = [float(x[1]) for x in dat ]
(p1,) = plt.plot(x_ar,y_ar,label=lab)
plt.title('Output Signal of Transistor Amplifier')
plt.xlabel('Time[S]',position=(0.9,1))
plt.ylabel('Signal [V]')
plt.grid(True)
ax = plt.gca()
ax.set_xlim(-1.1e-4,1.1e-4)
plots.append(p1)
if args.bode:
plt.subplot(2,1,1)
plt.legend(handles=plots) # make sure the legend is drawn
plt.subplot(2,1,2)
plt.legend(handles=plots) # make sure the legend is drawn
else:
plt.legend(handles=plots)
plt.savefig("csv_plot.pdf",orientation='landscape')
plt.show() # show the plot.
if __name__ == "__main__": # This makes sure that main() is called when you
# run the script from the command line.
sys.exit(main())
| gpl-3.0 |
kazemakase/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
jwcarr/flatlanders | analysis/plot.py | 1 | 16473 | from math import isinf, isnan
import matplotlib.pyplot as plt
from matplotlib import gridspec
import basics
# Colour palettes adapted from:
# http://wesandersonpalettes.tumblr.com
# https://github.com/karthik/wesanderson
# https://github.com/jiffyclub/palettable
Bottle_Rocket = ["#9B110E", "#3F5151", "#0C1707", "#550307"]
Cavalcanti = ['#D1AA00', '#083213', '#929460', '#6F9879', '#842111']
Chevalier = ['#355243', '#FECA49', '#C9D5D5', '#BBA289']
Darjeeling1 = ["#FF0000", "#F2AD00", "#F98400", "#5BBCD6"]
Darjeeling2 = ["#046C9A", "#D69C4E", "#ECCBAE", "#000000"]
Darjeeling3 = ['#9E9797', '#C28E00', '#836659', '#9C5A33']
Darjeeling4 = ['#D5E3D8', '#618A98', '#F9DA95', '#AE4B16', '#787064']
Fantastic_Fox1 = ["#E2D200", "#46ACC8", "#E58601", "#B40F20"]
Fantastic_Fox2 = ['#F9DB20', '#934B4E', '#42170D', '#C27922', '#E2C8A7']
Fantastic_Fox3 = ['#E4BF44', '#C65742', '#9AD0BB', '#332737', '#ABA18D']
Grand_Budapest1 = ['#EEAE65', '#FB4F55', '#481313', '#CC5F27']
Grand_Budapest2 = ["#E6A0C4", "#C6CDF7", "#D8A499", "#7294D4"]
Grand_Budapest3 = ['#FFA68E', '#FBCCB7', '#8C1108', '#290B04']
Grand_Budapest4 = ['#FFDCB6', '#253845', '#E7AD9D', '#66756E', '#8B3F31', '#966D35']
Life_Aquatic1 = ["#3B9AB2", "#F21A00", "#EBCC2A", "#78B7C5"]
Life_Aquatic2 = ['#0099E6', '#12255A', '#F23814', '#DFB78B', '#B6C3C5']
Life_Aquatic3 = ['#342419', '#1C4027', '#F1C90E', '#665899', '#B89382']
Moonrise1 = ["#F3DF6C", "#CEAB07", "#D5D5D3", "#24281A"]
Moonrise2 = ['#72CADD', '#F0A5B0', '#8C8536', '#C3B477', '#FAD063']
Moonrise3 = ['#667C74', '#B56A27', '#C2BA7C', '#1F1917']
Moonrise4 = ['#7B8761', '#C1A62E', '#4F8F6B', '#3B453C', '#9F3208']
Moonrise5 = ['#DF8C90', '#D8D28E', '#F5BE25', '#3D4A1C', '#D13060', '#A86B4C']
Royal1 = ['#897712', '#F3C2A4', '#F69F97', '#FED68C', '#629075']
Royal2 = ['#768B93', '#BC240F', '#F9ECC5', '#D47329']
Royal3 = ['#87A2A4', '#CAA065', '#D6CABF', '#D6A0A0']
Royal4 = ['#79A43A', '#F2D6AF', '#5E4829', '#181401']
Royal5 = ['#C2ABBA', '#8C3B49', '#B6ACA6', '#212053', '#D1D3D5']
Rushmore = ["#E1BD6D", "#F2300F", "#0B775E", "#35274A"]
# Globals
colours_by_experiment = [Life_Aquatic2, Grand_Budapest1, Darjeeling2]
markers_by_chain = ['s', 'o', 'p', '^']
class Plot:
line_thickness = 1.0
label_font_size = 8.0
axis_font_size = 7.0
legend_font_size = 8.0
legend_height = 0.15
def __init__(self, shape_x=1, shape_y=1, width=4.0, height=4.0):
self.shape_x = int(shape_x)
self.shape_y = int(shape_y)
self.height = float(height)
self.width = float(width)
self.datasets = [[None] * self.shape_x for y in range(self.shape_y)]
self.subplots = [[None] * self.shape_x for y in range(self.shape_y)]
#############################################
# PUBLIC METHODS
# Add a subplot or multiple subplots
def add(self, data, position_x=False, position_y=False):
if type(data) == tuple or type(data) == list:
if self.__number_of_empty_positions() < len(data):
print('Insufficient space to add %i subplots. Use PLOT.reshape() to reshape the plot or PLOT.peek() to review the layout.' % len(data))
return
for dataset in data:
self.__add_subplot(dataset, False, False)
else:
self.__add_subplot(data, position_x, position_y)
# Make the multipanel plot a reality and save as PDF
def make(self, save_name=False, save_location=False, legend_in_gap=False, per_column_legend=False):
if legend_in_gap == True and self.__number_of_empty_positions() == 0:
legend_in_gap = False
self.fig = plt.figure(figsize=(self.width, self.height))
ratios = self.__determine_height_ratios(legend_in_gap, per_column_legend)
self.grid = gridspec.GridSpec(nrows=self.shape_y+(1-int(legend_in_gap)), ncols=self.shape_x, height_ratios=ratios)
subplot_i = 0
for y in range(self.shape_y):
one_y_label = self.__determine_one_y_label(y)
for x in range(self.shape_x):
if self.datasets[y][x] == None:
self.__make_empty_subplot(x, y)
continue
self.__make_subplot(x, y, subplot_i, one_y_label)
subplot_i += 1
self.__add_legend(legend_in_gap, per_column_legend)
self.grid.tight_layout(self.fig, pad=0.1, h_pad=0.0-(1-int(legend_in_gap)), w_pad=0.75)
filename = self.__determine_filename(save_name, save_location)
plt.savefig(filename)
plt.clf()
# Peek inside the current state of the multipanel plot
def peek(self):
print ' ' + ''.join([' %i '%(x+1) for x in range(self.shape_x)])
for y in range(self.shape_y):
print_row = '%i '%(y+1)
for x in range(self.shape_x):
if self.datasets[y][x] == None:
print_row += '[ ] '
else:
print_row += '[x] '
print(print_row)
# Change the dimensions of the plot (in inches)
def resize(self, width, height):
self.height = float(height)
self.width = float(width)
# Change the number of columns and rows
def reshape(self, shape_x, shape_y):
shape_x = int(shape_x)
shape_y = int(shape_y)
if shape_x > self.shape_x:
if self.__add_columns(shape_x - self.shape_x) == True:
self.shape_x = shape_x
elif shape_x < self.shape_x:
if self.__remove_columns(self.shape_x - shape_x) == True:
self.shape_x = shape_x
if shape_y > self.shape_y:
if self.__add_rows(shape_y - self.shape_y) == True:
self.shape_y = shape_y
elif shape_y < self.shape_y:
if self.__remove_rows(self.shape_y - shape_y) == True:
self.shape_y = shape_y
# Remove a specific subplot
def remove(self, position_x, position_y):
try:
self.datasets[position_y-1][position_x-1] = None
self.subplots[position_y-1][position_x-1] = None
except IndexError:
print('Unable to remove the plot at position %i,%i. Review the plot using PLOT.peek().' % (position_x, position_y))
# Clear all subplots
def clear(self):
self.datasets = [[None] * self.shape_x for y in range(self.shape_y)]
self.subplots = [[None] * self.shape_x for y in range(self.shape_y)]
# Set the plot attributes
def set_label_size(self, size):
self.label_font_size = float(size)
def set_axis_size(self, size):
self.axis_font_size = float(size)
def set_legend_size(self, size):
self.legend_font_size = float(size)
def set_legend_height(self, size):
self.legend_height = float(size)
def set_line_thickness(self, size):
self.line_thickness = float(size)
#############################################
# PRIVATE METHODS
# Add a subplot to the multipanel plot
def __add_subplot(self, dataset, position_x, position_y):
if type(dataset) != dict:
print('Please pass a data dictionary generated from one of the experiment_results() functions.')
return
if (type(position_x) == bool and position_x == False) or (type(position_y) == bool and position_y == False):
position_x, position_y = self.__next_available_position()
if type(position_x) == bool and position_x == False:
print('No space left to add a new subplot. Use PLOT.reshape() to reshape the plot or specify a position to overwrite.')
return
else:
if (position_x > self.shape_x) or (position_y > self.shape_y):
print('Plot shape is %ix%i. Use PLOT.reshape() to reshape the plot or specify a different position.' % (self.shape_x, self.shape_y))
return
position_x, position_y = position_x-1, position_y-1
if self.datasets[position_y][position_x] != None and raw_input('Position %i,%i is in use. Overwrite? (y/n) ' % (position_x+1, position_y+1)) != 'y':
return
self.datasets[position_y][position_x] = dataset
# Make a subplot
def __make_subplot(self, position_x, position_y, subplot_i, one_y_label):
dataset = self.datasets[position_y][position_x]
matrix = self.__remove_NaN(dataset['data'])
experiment = dataset['experiment']
data_type = dataset['data_type']
y_range = dataset['y_range']
y_label = dataset['y_label']
starting_generation = dataset['starting_generation']
self.subplots[position_y][position_x] = self.fig.add_subplot(self.grid[position_y, position_x])
colours = colours_by_experiment[experiment-1]
chain_n = len(matrix)
generation_n = len(matrix[0])
if data_type in ['structure', 'sublexical_structure', 'sound_symbolism']:
self.__add_confidence_intervals(y_range[0], generation_n)
elif (data_type == 'expressivity_d' and experiment == 2) or (data_type == 'communicative_accuracy'):
self.__add_chance_level(16, generation_n)
for chain_i in range(0, chain_n):
x_vals = range(starting_generation, len(matrix[chain_i]) + starting_generation)
y_vals = [y for y in matrix[chain_i]]
plt.plot(x_vals, y_vals, color=colours[chain_i], marker=markers_by_chain[chain_i], markersize=5.0, markeredgecolor=colours[chain_i], linewidth=self.line_thickness, label='Chain ' + basics.chain_codes[experiment-1][chain_i])
plt.xlim(-0.5, generation_n + starting_generation - 0.5)
plt.ylim(y_range[0], y_range[1])
plt.xticks(range(0, 11), range(0, 11), fontsize=self.axis_font_size)
plt.yticks(fontsize=self.axis_font_size)
plt.tick_params(axis='x', which='both', bottom='off', top='off')
if position_x == 0 or one_y_label == False:
plt.ylabel(y_label, fontsize=self.label_font_size)
if position_x > 0 and one_y_label == True:
self.subplots[position_y][position_x].set_yticklabels([])
if data_type in ['expressivity_d', 'expressivity_s', 'expressivity_c', 'communicative_accuracy', 'communicative_error', 'transmission_error']:
self.__add_subplot_label(subplot_i, y_range[0], y_range[1], 'bottom')
else:
self.__add_subplot_label(subplot_i, y_range[0], y_range[1], 'top')
if position_y == self.shape_y-1:
plt.xlabel('Generation number', fontsize=self.label_font_size)
# Leave a position empty
def __make_empty_subplot(self, position_x, position_y):
self.subplots[position_y][position_x] = self.fig.add_subplot(self.grid[position_y, position_x])
plt.axis('off')
# Find out the next available position, working left to right, top to bottom
def __next_available_position(self):
for y in range(self.shape_y):
for x in range(self.shape_x):
if self.datasets[y][x] == None:
return x, y
return False, False
def __get_legend_handles(self):
for y in range(self.shape_y):
for x in range(self.shape_x):
if self.datasets[y][x] != None:
return self.subplots[y][x].get_legend_handles_labels()
return False, False
# Count the number of empty positions in the plot
def __number_of_empty_positions(self):
n = 0
for y in range(self.shape_y):
for x in range(self.shape_x):
if self.datasets[y][x] == None:
n += 1
return n
# Determine appropriate height ratios for the plots and legend
def __determine_height_ratios(self, legend_in_gap, per_column_legend):
if legend_in_gap == True:
return [self.height / self.shape_y] * self.shape_y
legend_height = self.legend_height
if per_column_legend == True:
legend_height *= 4.0
row_height = (self.height - legend_height) / self.shape_y
ratios = ([row_height] * self.shape_y) + [legend_height]
return ratios
# Determine if only one y-axis label is required (i.e. all plots on a row are the same type)
def __determine_one_y_label(self, y):
try:
if len(set([self.datasets[y][x]['data_type'] for x in range(self.shape_x)])) == 1:
return True
return False
except:
return False
# Determine the appropriate filename for saving
def __determine_filename(self, save_name, save_location):
if type(save_name) == bool and save_name == False:
save_name = 'plot'
if type(save_location) == bool and save_location == False:
save_location = basics.desktop_location
return save_location + save_name + '.pdf'
# Add dotted line "confidence intervals" at -1.96 and 1.96"
def __add_confidence_intervals(self, min_y, n):
plt.plot(range(-1,n+2), [1.959964] * (n+3), color='gray', linestyle=':', linewidth=0.5)
if min_y < -2:
plt.plot(range(-1,n+2), [-1.959964] * (n+3), color='gray', linestyle=':', linewidth=0.5)
# Add dotted line for indicating chance level
def __add_chance_level(self, level, n):
plt.plot(range(-1,n+2), [level] * (n+3), color='gray', linestyle=':', linewidth=0.5)
# Add on one legend for entire plot or one legend per column
def __add_legend(self, legend_in_gap, per_column_legend):
if legend_in_gap == True:
self.__add_gap_legend()
elif per_column_legend == True:
self.__add_per_column_legend()
else:
self.__add_normal_legend()
def __add_normal_legend(self):
legend = self.fig.add_subplot(self.grid[self.shape_y, :])
plt.axis('off')
handles, labels = self.subplots[0][0].get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, -0.2), frameon=False, prop={'size':self.legend_font_size}, ncol=4, numpoints=1)
def __add_gap_legend(self):
x, y = self.__next_available_position()
handles, labels = self.__get_legend_handles()
self.subplots[y][x].legend(handles, labels, loc='center', bbox_to_anchor=(0.5, 0.5), frameon=False, prop={'size':self.legend_font_size}, ncol=1, numpoints=1)
def __add_per_column_legend(self):
for x in range(self.shape_x):
legend = self.fig.add_subplot(self.grid[self.shape_y, x])
plt.axis('off')
legend.set_yticklabels([])
legend.set_xticklabels([])
handles, labels = self.subplots[0][x].get_legend_handles_labels()
plt.legend([handles[0], handles[2], handles[1], handles[3]], [labels[0], labels[2], labels[1], labels[3]], loc='lower center', bbox_to_anchor=(0.5, -0.2), frameon=False, prop={'size':self.legend_font_size}, ncol=2, numpoints=1, handletextpad=0.2)
# Add aubplot labels: (A), (B), (C), etc...
def __add_subplot_label(self, subplot_i, min_y, max_y, position):
try:
label = '(' + ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'[subplot_i]) + ')'
except IndexError:
label = '(' + str(subplot_i + 1) + ')'
padding = abs(min_y - max_y) / 15.
if position == 'top':
plt.text(0.2, max_y - padding, label, {'fontsize':8}, fontweight='bold', ha='left', va='top')
else:
plt.text(0.2, min_y + padding, label, {'fontsize':8}, fontweight='bold', ha='left', va='bottom')
# Remove NaN or infinity from data matrices and replace with None
def __remove_NaN(self, matrix):
new_matrix = []
for row in matrix:
new_row = []
for cell in row:
if cell != None:
if isnan(cell) == True:
new_row.append(None)
elif isinf(cell) == True:
new_row.append(None)
else:
new_row.append(cell)
new_matrix.append(new_row)
return new_matrix
# Add new column(s) to the right of the plot
def __add_columns(self, n):
for y in range(self.shape_y):
self.datasets[y] += [None] * n
self.subplots[y] += [None] * n
return True
# Remove column(s) from the right, warning if this will lead to plot removal
def __remove_columns(self, n):
cells_in_use = 0
for row in self.datasets:
for i in range(1, n+1):
if row[i*-1] != None:
cells_in_use += 1
if cells_in_use > 0:
plural = ''
if cells_in_use > 1: plural = 's'
if raw_input('This will erase %i plot%s. Continue? (y/n) ' % (cells_in_use, plural)) != 'y':
return False
for row in self.datasets:
for i in range(n):
del row[-1]
for row in self.subplots:
for i in range(n):
del row[-1]
return True
# Add new row(s) to the bottom of the plot
def __add_rows(self, n):
self.datasets += [[None] * self.shape_x for i in range(n)]
self.subplots += [[None] * self.shape_x for i in range(n)]
return True
# Remove row(s) from the bottom, warning if this will lead to plot removal
def __remove_rows(self, n):
cells_in_use = 0
for i in range(1, n+1):
for cell in self.datasets[i*-1]:
if cell != None:
cells_in_use += 1
if cells_in_use > 0:
plural = ''
if cells_in_use > 1: plural = 's'
if raw_input('This will erase %i plot%s. Continue? (y/n) ' % (cells_in_use, plural)) != 'y':
return False
for i in range(n):
del self.datasets[-1]
del self.subplots[-1]
return True
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/test_join.py | 11 | 5226 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import DataFrame, Index, PeriodIndex
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@pytest.fixture
def frame_with_period_index():
return DataFrame(
data=np.arange(20).reshape(4, 5),
columns=list('abcde'),
index=PeriodIndex(start='2000', freq='A', periods=4))
@pytest.fixture
def frame():
return TestData().frame
@pytest.fixture
def left():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
@pytest.mark.parametrize(
"how, sort, expected",
[('inner', False, DataFrame({'a': [20, 10],
'b': [200, 100]},
index=[2, 1])),
('inner', True, DataFrame({'a': [10, 20],
'b': [100, 200]},
index=[1, 2])),
('left', False, DataFrame({'a': [20, 10, 0],
'b': [200, 100, np.nan]},
index=[2, 1, 0])),
('left', True, DataFrame({'a': [0, 10, 20],
'b': [np.nan, 100, 200]},
index=[0, 1, 2])),
('right', False, DataFrame({'a': [np.nan, 10, 20],
'b': [300, 100, 200]},
index=[3, 1, 2])),
('right', True, DataFrame({'a': [10, 20, np.nan],
'b': [100, 200, 300]},
index=[1, 2, 3])),
('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3])),
('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3]))])
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
def test_join_index(frame):
# left / right
f = frame.loc[frame.index[:10], ['A', 'B']]
f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
joined = f.join(f2)
tm.assert_index_equal(f.index, joined.index)
expected_columns = Index(['A', 'B', 'C', 'D'])
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='left')
tm.assert_index_equal(joined.index, f.index)
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='right')
tm.assert_index_equal(joined.index, f2.index)
tm.assert_index_equal(joined.columns, expected_columns)
# inner
joined = f.join(f2, how='inner')
tm.assert_index_equal(joined.index, f.index[5:10])
tm.assert_index_equal(joined.columns, expected_columns)
# outer
joined = f.join(f2, how='outer')
tm.assert_index_equal(joined.index, frame.index.sort_values())
tm.assert_index_equal(joined.columns, expected_columns)
tm.assert_raises_regex(
ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with tm.assert_raises_regex(ValueError, 'columns overlap but '
'no suffix'):
frame.join(frame, how=how)
def test_join_index_more(frame):
af = frame.loc[:, ['A', 'B']]
bf = frame.loc[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = frame['C'][::2]
expected['D'] = frame['D'][::2]
result = af.join(bf)
tm.assert_frame_equal(result, expected)
result = af.join(bf, how='right')
tm.assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_join_index_series(frame):
df = frame.copy()
s = df.pop(frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
tm.assert_frame_equal(joined, frame, check_names=False)
s.name = None
tm.assert_raises_regex(ValueError, 'must have a name', df.join, s)
def test_join_overlap(frame):
df1 = frame.loc[:, ['A', 'B', 'C']]
df2 = frame.loc[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
no_overlap = frame.loc[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
def test_join_period_index(frame_with_period_index):
other = frame_with_period_index.rename(
columns=lambda x: '{key}{key}'.format(key=x))
joined_values = np.concatenate(
[frame_with_period_index.values] * 2, axis=1)
joined_cols = frame_with_period_index.columns.append(other.columns)
joined = frame_with_period_index.join(other)
expected = DataFrame(
data=joined_values,
columns=joined_cols,
index=frame_with_period_index.index)
tm.assert_frame_equal(joined, expected)
| apache-2.0 |
COHRINT/cops_and_robots | src/cops_and_robots/helpers/visualizations.py | 1 | 3212 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def plot_multisurface(X, Y, Z, ax, cmaps=None, min_alpha=0.6, **kwargs):
num_surfs = Z.shape[2]
if cmaps == None:
cmaps = ['Greys', 'Reds', 'Purples', 'Oranges', 'Greens', 'Blues',
'RdPu']
while num_surfs > len(cmaps):
cmaps += cmaps
#<>TODO: Include customizable c_max and c_min for color
z_max = np.zeros(num_surfs)
for i in range(num_surfs):
z_max[i] = np.nanmax(Z[:,:,i])
z_min = np.zeros(num_surfs)
for i in range(num_surfs):
z_min[i] = np.nanmin(Z[:,:,i])
# Set color values
C = np.zeros_like(Z, dtype=object)
for z_i in range(num_surfs):
for i in range(X.shape[0]):
for j in range(X.shape[1]):
cmap = cmaps[z_i]
z_norm = (Z[i, j, z_i] - z_min[z_i]) / (z_max[z_i] - z_min[z_i])
color = list(plt.get_cmap(cmap)(z_norm * 0.6 + 0.2) )
color[3] = np.max([z_norm * 0.7 + 0.2, min_alpha]) # set alpha
C[i, j, z_i] = color
# Create a transparent bridge region
X_bridge = np.vstack([X[-1,:], X[0,:]])
Y_bridge = np.vstack([Y[-1,:], Y[0,:]])
Z_bridge = np.zeros((X_bridge.shape[0], Y_bridge.shape[1], num_surfs - 1))
C_bridge = np.empty_like(Z_bridge, dtype=object)
for z_i in range(num_surfs - 1):
Z_bridge[:, :, z_i] = np.vstack([Z[-1, :, z_i], Z[0, :, z_i + 1]])
C_bridge.fill((1,1,1,0)) # RGBA colour, only the last component matters.
# Join each two-pair of surfaces surfaces flipping one of them (using also the bridge)
X_full = np.vstack([X, X_bridge, X])
Y_full = np.vstack([Y, Y_bridge, Y])
Z_full = np.vstack([Z[:, :, 0], Z_bridge[:, :, 0], Z[:, :, 1]])
C_full = np.vstack([C[:, :, 0], C_bridge[:, :, 0], C[:, :, 1]])
# Join any additional surfaces
z_i = 1
while z_i + 1 < num_surfs:
X_full = np.vstack([X_full, X_bridge, X])
Y_full = np.vstack([Y_full, Y_bridge, Y])
Z_full = np.vstack([Z_full, Z_bridge[:, :, z_i], Z[:, :, z_i + 1]])
C_full = np.vstack([C_full, C_bridge[:, :, z_i], C[:, :, z_i + 1]])
z_i += 1
surf_full = ax.plot_surface(X_full, Y_full, Z_full, linewidth=0,
facecolors=C_full, antialiased=True, **kwargs)
return surf_full
if __name__ == '__main__':
from scipy.special import erf
X = np.arange(-5, 5, 0.3)
Y = np.arange(-5, 5, 0.3)
X, Y = np.meshgrid(X, Y)
Z1 = np.empty_like(X)
Z2 = np.empty_like(X)
Z3 = np.empty_like(X)
Z4 = np.empty_like(X)
for i in range(len(X)):
for j in range(len(X[0])):
z1 = 0.5*(erf((+X[i,j]+Y[i,j] - 2)*0.5) +1)
z2 = 0.5*(erf((+X[i,j]-Y[i,j] - 2)*0.5) +1)
z3 = 0.5*(erf((-X[i,j]+Y[i,j] - 2)*0.5) +1)
z4 = 0.5*(erf((-X[i,j]-Y[i,j] - 2)*0.5) +1)
Z1[i,j] = z1
Z2[i,j] = z2
Z3[i,j] = z3
Z4[i,j] = z4
Z = np.dstack((Z1, Z2, Z3, Z4))
fig = plt.figure(figsize=(10,8))
ax = fig.gca(projection='3d')
surf = plot_multisurface(X, Y, Z, ax, cstride=1, rstride=1)
plt.show() | apache-2.0 |
mjgrav2001/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
lukeshingles/artistools | artistools/nonthermal.py | 1 | 13926 | #!/usr/bin/env python3
import argparse
# import glob
import math
# import re
import multiprocessing
import os
from collections import namedtuple
from functools import lru_cache
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
# import matplotlib.ticker as ticker
import pandas as pd
from astropy import units as u
import artistools as at
import artistools.spencerfano
DEFAULTSPECPATH = '../example_run/spec.out'
defaultoutputfile = 'plotnonthermal_cell{0:03d}_timestep{1:03d}.pdf'
@lru_cache(maxsize=4)
def read_files(modelpath, timestep=-1, modelgridindex=-1):
"""Read ARTIS -thermal spectrum data into a pandas DataFrame."""
nonthermaldata = pd.DataFrame()
mpiranklist = at.get_mpiranklist(modelpath, modelgridindex=modelgridindex)
for folderpath in at.get_runfolders(modelpath, timestep=timestep):
for mpirank in mpiranklist:
nonthermalfile = f'nonthermalspec_{mpirank:04d}.out'
filepath = Path(folderpath, nonthermalfile)
if not filepath.is_file():
filepath = Path(folderpath, nonthermalfile + '.gz')
if not filepath.is_file():
print(f'Warning: Could not find {filepath.relative_to(modelpath.parent)}')
continue
if modelgridindex > -1:
filesize = Path(filepath).stat().st_size / 1024 / 1024
print(f'Reading {Path(filepath).relative_to(modelpath.parent)} ({filesize:.2f} MiB)')
nonthermaldata_thisfile = pd.read_csv(filepath, delim_whitespace=True, error_bad_lines=False)
# radfielddata_thisfile[['modelgridindex', 'timestep']].apply(pd.to_numeric)
if timestep >= 0:
nonthermaldata_thisfile.query('timestep==@timestep', inplace=True)
if modelgridindex >= 0:
nonthermaldata_thisfile.query('modelgridindex==@modelgridindex', inplace=True)
if not nonthermaldata_thisfile.empty:
if timestep >= 0 and modelgridindex >= 0:
return nonthermaldata_thisfile
else:
nonthermaldata = nonthermaldata.append(nonthermaldata_thisfile.copy(), ignore_index=True)
return nonthermaldata
def ar_xs(energy_ev, ionpot_ev, A, B, C, D):
u = energy_ev / ionpot_ev
if u <= 1:
return 0
return 1e-14 * (
A * (1 - 1 / u) + B * pow((1 - 1 / u), 2) + C * math.log(u) + D * math.log(u) / u) / (u * pow(ionpot_ev, 2))
def xs_fe2_old(energy):
# AR1985
shell_a = ar_xs(energy, 16.2, 90.0, -60.0, 0.2, -86)
shell_b = ar_xs(energy, 17.5, 18.6, -5.9, 0.6, -9)
shell_c = ar_xs(energy, 81, 69.9, -23.7, 9.5, -51.7)
return shell_a + shell_b + shell_c
def get_arxs_array_shell(arr_enev, shell):
ar_xs_array = np.array(
[ar_xs(energy_ev, shell.ionpot_ev, shell.A, shell.B, shell.C, shell.D) for energy_ev in arr_enev])
return ar_xs_array
def get_arxs_array_ion(arr_enev, dfcollion, Z, ionstage):
ar_xs_array = np.zeros(len(arr_enev))
dfcollion_thision = dfcollion.query('Z == @Z and ionstage == @ionstage')
for index, shell in dfcollion_thision.iterrows():
ar_xs_array += get_arxs_array_shell(arr_enev, shell)
return ar_xs_array
def read_colliondata(collionfilename='collion.txt', modelpath=Path(at.PYDIR, 'data')):
collionrow = namedtuple('collionrow', ['Z', 'nelec', 'n', 'l', 'ionpot_ev', 'A', 'B', 'C', 'D'])
with open(Path(modelpath, collionfilename), 'r') as collionfile:
print(f'Collionfile: expecting {collionfile.readline().strip()} rows')
dfcollion = pd.read_csv(
collionfile, delim_whitespace=True, header=None, names=collionrow._fields)
dfcollion.eval('ionstage = Z - nelec + 1', inplace=True)
return dfcollion
def make_xs_plot(axis, nonthermaldata, args):
dfcollion = read_colliondata()
arr_en = nonthermaldata['energy_ev'].unique()
# arr_xs_old = [xs_fe2_old(en) for en in arr_en]
# arr_xs_times_y = [xs_fe1(en) * y for en, y in zip(nonthermaldata['energy_ev'], nonthermaldata['y'])]
axis.plot(arr_en, get_arxs_array_ion(arr_en, dfcollion, 26, 2), linewidth=2.0, label='Fe II')
axis.plot(arr_en, get_arxs_array_ion(arr_en, dfcollion, 28, 2), linewidth=2.0, label='Ni II')
axis.set_ylabel(r'cross section (cm2)')
if not args.nolegend:
axis.legend(loc='upper center', handlelength=2, frameon=False, numpoints=1, prop={'size': 13})
def inteuler(x, y):
dx = y[1:] - y[:-1]
return np.dot(x[:-1], dx)
def plot_contributions(axis, modelpath, timestep, modelgridindex, nonthermaldata, args):
estimators = at.estimators.read_estimators(modelpath, get_ion_values=True, get_heatingcooling=True,
modelgridindex=modelgridindex, timestep=timestep)
# print(estimators[(timestep, modelgridindex)].keys())
total_depev = (estimators[(timestep, modelgridindex)]['total_dep'] * u.erg.to('eV'))
print(f"Deposition: {total_depev:.1f} eV / cm3 / s")
arr_enev = nonthermaldata['energy_ev'].values
arr_y = nonthermaldata['y'].values
frac_ionisation = 0.
dfcollion = read_colliondata()
elementlist = at.get_composition_data(modelpath)
totalpop = estimators[(timestep, modelgridindex)]['populations']['total']
nelements = len(elementlist)
for element in range(nelements):
Z = elementlist.Z[element]
elpop = estimators[(timestep, modelgridindex)]['populations'][Z]
if elpop <= 1e-4 * totalpop:
continue
arr_ionisation_element = np.zeros(len(arr_enev), dtype=float)
frac_ionisation_element = 0.
nions = elementlist.nions[element]
for ion in range(nions):
ionstage = ion + elementlist.lowermost_ionstage[element]
ionpop = estimators[(timestep, modelgridindex)]['populations'][(Z, ionstage)]
dfcollion_thision = dfcollion.query('Z == @Z and ionstage == @ionstage')
# print(at.get_ionstring(Z, ionstage), ionpop)
arr_ionisation_ion = np.zeros(len(arr_enev), dtype=float)
frac_ionisation_ion = 0.
for index, row in dfcollion_thision.iterrows():
arr_xs = get_arxs_array_shell(arr_enev, row)
arr_ionisation_shell = ionpop * arr_y * arr_xs * row.ionpot_ev / total_depev
arr_ionisation_ion += arr_ionisation_shell
frac_ionisation_shell = np.trapz(x=arr_enev, y=arr_ionisation_shell)
frac_ionisation_ion += frac_ionisation_shell
arr_ionisation_element += arr_ionisation_ion
frac_ionisation_element += frac_ionisation_ion
frac_ionisation += frac_ionisation_element
if frac_ionisation_element > 1e-5:
axis.plot(arr_enev, arr_ionisation_element, label=f'Ionisation Z={Z}')
nne = estimators[(timestep, modelgridindex)]['nne']
arr_heating = np.array([at.spencerfano.lossfunction(enev, nne) / total_depev for enev in arr_enev])
frac_heating = np.trapz(x=arr_enev, y=arr_heating)
print(f' frac_heating: {frac_heating}')
print(f'frac_ionisation: {frac_ionisation}')
axis.plot(arr_enev, arr_heating, label='Heating')
axis.legend(loc='best', handlelength=2, frameon=False, numpoints=1, prop={'size': 11})
def make_plot(modelpaths, args):
nplots = 1
if args.xsplot:
nplots += 1
if args.showcontributions:
nplots += 1
fig, axes = plt.subplots(nrows=nplots, ncols=1, sharex=True,
figsize=(args.figscale * at.figwidth, args.figscale * at.figwidth * 0.7 * nplots),
tight_layout={"pad": 0.2, "w_pad": 0.0, "h_pad": 0.0})
if nplots == 1:
axes = [axes]
if args.kf1992spec:
kf92spec = pd.read_csv(Path(modelpaths[0], 'KF1992spec-fig1.txt'), header=None, names=['e_kev', 'log10_y'])
kf92spec['energy_ev'] = kf92spec['e_kev'] * 1000.
kf92spec.eval('y = 10 ** log10_y', inplace=True)
axes[0].plot(kf92spec['energy_ev'], kf92spec['log10_y'],
linewidth=2.0, color='red', label='Kozma & Fransson (1992)')
for index, modelpath in enumerate(modelpaths):
modelname = at.get_model_name(modelpath)
if args.velocity >= 0.:
modelgridindex = at.get_mgi_of_velocity_kms(modelpath, args.velocity)
else:
modelgridindex = args.modelgridindex
if args.timedays:
timestep = at.get_timestep_of_timedays(modelpath, args.timedays)
else:
timestep = args.timestep
nonthermaldata = read_files(
modelpath=Path(modelpath),
modelgridindex=modelgridindex, timestep=timestep)
if args.xmin:
nonthermaldata.query('energy_ev >= @args.xmin', inplace=True)
if nonthermaldata.empty:
print(f'No data for timestep {timestep:d}')
continue
if index < len(args.modellabels):
model_label = args.modellabels[index]
else:
model_label = f'{modelname} cell {modelgridindex} at timestep {timestep}'
try:
time_days = float(at.get_timestep_time('.', timestep))
except FileNotFoundError:
time_days = 0
else:
model_label += f' ({time_days:.2f}d)'
outputfile = str(args.outputfile).format(modelgridindex, timestep)
print(f'Plotting timestep {timestep:d}')
# ymax = max(nonthermaldata['y'])
# nonthermaldata.plot(x='energy_ev', y='y', linewidth=1.5, ax=axis, color='blue', legend=False)
axes[0].plot((nonthermaldata['energy_ev']), np.log10(nonthermaldata['y']), label=model_label,
linewidth=2.0, color='black' if index == 0 else None, alpha=0.95)
axes[0].set_ylabel(r'log [y (e$^-$ / cm$^2$ / s / eV)]')
if args.showcontributions:
plot_contributions(axes[1], modelpath, timestep, modelgridindex, nonthermaldata, args)
if args.xsplot:
make_xs_plot(axes[-1], nonthermaldata, args)
if not args.nolegend:
axes[0].legend(loc='best', handlelength=2, frameon=False, numpoints=1)
axes[-1].set_xlabel(r'Energy (eV)')
# axis.yaxis.set_minor_locator(ticker.MultipleLocator(base=0.1))
# axis.set_yscale("log", nonposy='clip')
for ax in axes:
if args.xmin is not None:
ax.set_xlim(left=args.xmin)
if args.xmax:
ax.set_xlim(right=args.xmax)
# axis.set_ylim(bottom=0.0, top=ymax)
# axis.legend(loc='upper center', handlelength=2,
# frameon=False, numpoints=1, prop={'size': 13})
print(f'Saving to {outputfile:s}')
fig.savefig(outputfile, format='pdf')
plt.close()
def addargs(parser):
parser.add_argument('-modelpath', default=[], nargs='*', action=at.AppendPath,
help='Paths to ARTIS folders with spec.out or packets files')
parser.add_argument('-modellabels', default=[], nargs='*',
help='Model name overrides')
parser.add_argument('-listtimesteps', action='store_true',
help='Show the times at each timestep')
parser.add_argument('-xsplot', action='store_true',
help='Show the cross section plot')
parser.add_argument('-timedays', '-time', '-t',
help='Time in days to plot')
parser.add_argument('-timestep', '-ts', type=int, default=-1,
help='Timestep number to plot')
parser.add_argument('-modelgridindex', '-cell', type=int, default=0,
help='Modelgridindex to plot')
parser.add_argument('-velocity', '-v', type=float, default=-1,
help='Specify cell by velocity')
parser.add_argument('-xmin', type=float, default=0.,
help='Plot range: minimum energy in eV')
parser.add_argument('-xmax', type=float,
help='Plot range: maximum energy in eV')
parser.add_argument('--nolegend', action='store_true',
help='Suppress the legend from the plot')
parser.add_argument('--showcontributions', action='store_true',
help='Plot the NT contributions to ionisation and heating energy')
parser.add_argument('--kf1992spec', action='store_true',
help='Show the pure-oxygen result form Figure 1 of Kozma & Fransson 1992')
parser.add_argument('-figscale', type=float, default=1.,
help='Scale factor for plot area. 1.0 is for single-column')
parser.add_argument('-o', action='store', dest='outputfile', type=Path,
default=defaultoutputfile,
help='Filename for PDF file')
def main(args=None, argsraw=None, **kwargs):
"""Plot the electron energy distribution."""
if args is None:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Plot ARTIS non-thermal electron energy spectrum.')
addargs(parser)
parser.set_defaults(**kwargs)
args = parser.parse_args(argsraw)
if not args.modelpath:
args.modelpath = [Path('.')]
elif isinstance(args.modelpath, (str, Path)):
args.modelpath = [args.modelpath]
# flatten the list
modelpaths = []
for elem in args.modelpath:
if isinstance(elem, list):
modelpaths.extend(elem)
else:
modelpaths.append(elem)
if os.path.isdir(args.outputfile):
args.outputfile = os.path.join(args.outputfile, defaultoutputfile)
if args.listtimesteps:
at.showtimesteptimes()
else:
make_plot(modelpaths, args)
if __name__ == "__main__":
multiprocessing.freeze_support()
main()
| mit |
sanketloke/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
bert9bert/statsmodels | examples/python/glm.py | 5 | 3979 |
## Generalized Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
from scipy import stats
from matplotlib import pyplot as plt
# ## GLM: Binomial response data
#
# ### Load data
#
# In this example, we use the Star98 dataset which was taken with permission
# from Jeff Gill (2000) Generalized linear models: A unified approach. Codebook
# information can be obtained by typing:
print(sm.datasets.star98.NOTE)
# Load the data and add a constant to the exogenous (independent) variables:
data = sm.datasets.star98.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# The dependent variable is N by 2 (Success: NABOVE, Failure: NBELOW):
print(data.endog[:5,:])
# The independent variables include all the other variables described above, as
# well as the interaction terms:
print(data.exog[:2,:])
# ### Fit and summary
glm_binom = sm.GLM(data.endog, data.exog, family=sm.families.Binomial())
res = glm_binom.fit()
print(res.summary())
# ### Quantities of interest
print('Total number of trials:', data.endog[0].sum())
print('Parameters: ', res.params)
print('T-values: ', res.tvalues)
# First differences: We hold all explanatory variables constant at their means and manipulate the percentage of low income households to assess its impact on the response variables:
means = data.exog.mean(axis=0)
means25 = means.copy()
means25[0] = stats.scoreatpercentile(data.exog[:,0], 25)
means75 = means.copy()
means75[0] = lowinc_75per = stats.scoreatpercentile(data.exog[:,0], 75)
resp_25 = res.predict(means25)
resp_75 = res.predict(means75)
diff = resp_75 - resp_25
# The interquartile first difference for the percentage of low income households in a school district is:
print("%2.4f%%" % (diff*100))
# ### Plots
#
# We extract information that will be used to draw some interesting plots:
nobs = res.nobs
y = data.endog[:,0]/data.endog.sum(1)
yhat = res.mu
# Plot yhat vs y:
from statsmodels.graphics.api import abline_plot
fig, ax = plt.subplots()
ax.scatter(yhat, y)
line_fit = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()
abline_plot(model_results=line_fit, ax=ax)
ax.set_title('Model Fit Plot')
ax.set_ylabel('Observed values')
ax.set_xlabel('Fitted values');
# Plot yhat vs. Pearson residuals:
fig, ax = plt.subplots()
ax.scatter(yhat, res.resid_pearson)
ax.hlines(0, 0, 1)
ax.set_xlim(0, 1)
ax.set_title('Residual Dependence Plot')
ax.set_ylabel('Pearson Residuals')
ax.set_xlabel('Fitted values')
# Histogram of standardized deviance residuals:
from scipy import stats
fig, ax = plt.subplots()
resid = res.resid_deviance.copy()
resid_std = stats.zscore(resid)
ax.hist(resid_std, bins=25)
ax.set_title('Histogram of standardized deviance residuals');
# QQ Plot of Deviance Residuals:
from statsmodels import graphics
graphics.gofplots.qqplot(resid, line='r')
# ## GLM: Gamma for proportional count response
#
# ### Load data
#
# In the example above, we printed the ``NOTE`` attribute to learn about the
# Star98 dataset. Statsmodels datasets ships with other useful information. For
# example:
print(sm.datasets.scotland.DESCRLONG)
# Load the data and add a constant to the exogenous variables:
data2 = sm.datasets.scotland.load()
data2.exog = sm.add_constant(data2.exog, prepend=False)
print(data2.exog[:5,:])
print(data2.endog[:5])
# ### Fit and summary
glm_gamma = sm.GLM(data2.endog, data2.exog, family=sm.families.Gamma())
glm_results = glm_gamma.fit()
print(glm_results.summary())
# ## GLM: Gaussian distribution with a noncanonical link
#
# ### Artificial data
nobs2 = 100
x = np.arange(nobs2)
np.random.seed(54321)
X = np.column_stack((x,x**2))
X = sm.add_constant(X, prepend=False)
lny = np.exp(-(.03*x + .0001*x**2 - 1.0)) + .001 * np.random.rand(nobs2)
# ### Fit and summary
gauss_log = sm.GLM(lny, X, family=sm.families.Gaussian(sm.families.links.log()))
gauss_log_results = gauss_log.fit()
print(gauss_log_results.summary())
| bsd-3-clause |
CJ-Wright/scikit-beam | doc/sphinxext/tests/test_docscrape.py | 12 | 14257 | # -*- encoding:utf-8 -*-
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
from docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('shape=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal(
[n for n, _, _ in doc['Parameters']], ['mean', 'cov', 'shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N,N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_returns():
assert_equal(len(doc['Returns']), 1)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
print(doc['index'])
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a, b):
a = [l for l in a.split('\n') if l.strip()]
b = [l for l in b.split('\n') if l.strip()]
for n, line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n, line, b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N,N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name, _, desc = doc5['Raises'][0]
assert_equal(name, 'LinAlgException')
assert_equal(desc, ['If array is singular.'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
| bsd-3-clause |
nfoti/StarCluster | utils/scimage_11_10.py | 20 | 15705 | #!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/oneiric/current
$ wget $EC2_UBUNTU_IMG_URL/oneiric-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/oneiric-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf oneiric-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f oneiric-server-cloudimg-amd64.img
$ resize2fs oneiric-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount oneiric-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -o bind /dev /tmp/img-mount/dev
Copy /etc/resolv.conf and /etc/mtab to the image::
$ cp /etc/resolv.conf /tmp/img-mount/etc/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils "
BUILD_UTILS_PKGS += "python-setuptools python-pip python-nose"
CLOUD_CFG_FILE = '/etc/cloud/cloud.cfg'
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian maverick-cdh3 contrib'
CONDOR_APT = 'http://www.cs.wisc.edu/condor/debian/development lenny contrib'
NUMPY_SCIPY_SITE_CFG = """\
[DEFAULT]
library_dirs = /usr/lib
include_dirs = /usr/include:/usr/include/suitesparse
[blas_opt]
libraries = ptf77blas, ptcblas, atlas
[lapack_opt]
libraries = lapack, ptf77blas, ptcblas, atlas
[amd]
amd_libs = amd
[umfpack]
umfpack_libs = umfpack
[fftw]
libraries = fftw3
"""
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
StarCluster Ubuntu 11.10 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://web.mit.edu/starcluster
Documentation: http://web.mit.edu/starcluster/docs/latest
Code: https://github.com/jtriley/StarCluster
Mailing list: starcluster@mit.edu
This AMI Contains:
* Custom-Compiled Atlas, Numpy, Scipy, etc
* Open Grid Scheduler (OGS) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* IPython 0.12 with parallel support
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./jobscript.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
CLOUD_INIT_CFG = """\
user: ubuntu
disable_root: 0
preserve_hostname: False
# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
cloud_init_modules:
- bootcmd
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- timezone
- puppet
- chef
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- keys-to-console
- final-message
apt_sources:
- source: deb $MIRROR $RELEASE multiverse
- source: deb %(CLOUDERA_APT)s
- source: deb-src %(CLOUDERA_APT)s
- source: deb %(CONDOR_APT)s
""" % dict(CLOUDERA_APT=CLOUDERA_APT, CONDOR_APT=CONDOR_APT)
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
srcfile = open(APT_SOURCES_FILE, 'w')
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
srcfile.write('deb %s\n' % CONDOR_APT)
srcfile.close()
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
apt_install('git')
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm /var/lock")
apt_install('condor')
run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
run_command('mkdir /var/lib/condor/log')
run_command('mkdir /var/lib/condor/run')
run_command('chown -R condor:condor /var/lib/condor/log')
run_command('chown -R condor:condor /var/lib/condor/run')
def install_torque():
chdir(SRC_DIR)
apt_install('torque-server torque-mom torque-client')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_atlas():
"""docstring for install_atlas"""
chdir(SRC_DIR)
apt_command('build-dep atlas')
if glob.glob("*atlas*.deb"):
run_command('dpkg -i *atlas*.deb')
return
apt_command('source atlas')
chdir('atlas-*')
run_command('fakeroot debian/rules custom')
run_command('dpkg -i ../*atlas*.deb')
def install_numpy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
apt_command('build-dep python-numpy')
if glob.glob('*numpy*.deb'):
run_command('dpkg -i *numpy*.deb')
return
apt_command('source python-numpy')
chdir('python-numpy*')
sitecfg = open('site.cfg', 'w')
sitecfg.write(NUMPY_SCIPY_SITE_CFG)
sitecfg.close()
_fix_atlas_rules()
def _deb_failure_callback(retval):
if not glob.glob('../*numpy*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*numpy*.deb')
def install_scipy():
"""docstring for install_scipy"""
chdir(SRC_DIR)
apt_command('build-dep python-scipy')
if glob.glob('*scipy*.deb'):
run_command('dpkg -i *scipy*.deb')
return
apt_command('source python-scipy')
chdir('python-scipy*')
sitecfg = open('site.cfg', 'w')
sitecfg.write(NUMPY_SCIPY_SITE_CFG)
sitecfg.close()
_fix_atlas_rules()
def _deb_failure_callback(retval):
if not glob.glob('../*numpy*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*scipy*.deb')
def install_openmpi():
chdir(SRC_DIR)
apt_command('build-dep libopenmpi-dev')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
return
apt_command('source libopenmpi-dev')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build openmpi with Grid Engine support")
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install pyzmq==2.1.9')
run_command('pip install ipython tornado pygments')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-starcluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_cloud_init():
"""docstring for configure_cloud_init"""
cloudcfg = open('/etc/cloud/cloud.cfg', 'w')
cloudcfg.write(CLOUD_INIT_CFG)
cloudcfg.close()
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = "python-dev git vim mercurial subversion cvs encfs "
pkgs += "openmpi-bin libopenmpi-dev python-django "
pkgs += "keychain screen tmux zsh ksh csh tcsh python-mpi4py "
pkgs += "python-virtualenv python-imaging python-boto python-matplotlib "
pkgs += "unzip rar unace build-essential gfortran ec2-api-tools "
pkgs += "ec2-ami-tools mysql-server mysql-client apache2 "
pkgs += "libapache2-mod-wsgi sysv-rc-conf pssh emacs cython irssi "
pkgs += "python-distutils-extra htop vim-scripts python-ctypes python-pudb"
apt_install(pkgs)
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm /etc/resolv.conf')
run_command('rm /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm %s' % f)
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_cloud_init()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_gridscheduler()
install_condor()
#install_torque()
install_pydrmaa()
install_atlas()
install_numpy()
install_scipy()
install_ipython()
install_openmpi()
install_hadoop()
install_nfs()
install_default_packages()
configure_init()
cleanup()
if __name__ == '__main__':
main()
| lgpl-3.0 |
zkraime/osf.io | scripts/analytics/links.py | 55 | 1227 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from .utils import plot_dates, mkdirp
link_collection = database['privatelink']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_view_only_links():
dates = [
record['date_created']
for record in link_collection.find({}, {'date_created': True})
]
if not dates:
return
fig = plot_dates(dates)
plt.title('view-only links ({} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'view-only-links.png'))
plt.close()
def analyze_view_only_links_anonymous():
dates = [
record['date_created']
for record in link_collection.find(
{'anonymous': True},
{'date_created': True},
)
]
if not dates:
return
fig = plot_dates(dates)
plt.title('anonymous view-only links ({} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'view-only-links-anonymous.png'))
plt.close()
def main():
analyze_view_only_links()
analyze_view_only_links_anonymous()
if __name__ == '__main__':
main()
| apache-2.0 |
justacec/bokeh | bokeh/charts/builders/timeseries_builder.py | 6 | 3925 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries chart, which provides a convenient interface for
generating different charts using series-like data by transforming the data
to a consistent format and producing renderers.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from ..builder import create_and_build
from .line_builder import LineBuilder, PointSeriesBuilder
from .step_builder import StepBuilder
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
BUILDER_TYPES = {
'line': LineBuilder,
'step': StepBuilder,
'point': PointSeriesBuilder
}
def TimeSeries(data=None, x=None, y=None, builder_type=LineBuilder, **kws):
""" Create a timeseries chart using :class:`LineBuilder
<bokeh.charts.builder.line_builder.LineBuilder>` to produce the renderers from
the inputs. The timeseries chart acts as a switchboard to produce charts
for timeseries data with different glyph representations.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each stepped line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
builder_type (str or `Builder`, optional): the type of builder to use to produce
the renderers. Supported options are 'line', 'step', or 'point'.
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import TimeSeries, show, output_file, vplot
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
data = dict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
tsline = TimeSeries(data,
x='Date', y=['IBM', 'MSFT', 'AAPL'],
color=['IBM', 'MSFT', 'AAPL'], dash=['IBM', 'MSFT', 'AAPL'],
title="Timeseries", ylabel='Stock Prices', legend=True)
tspoint = TimeSeries(data,
x='Date', y=['IBM', 'MSFT', 'AAPL'],
color=['IBM', 'MSFT', 'AAPL'], dash=['IBM', 'MSFT', 'AAPL'],
builder_type='point', title="Timeseries Points",
ylabel='Stock Prices', legend=True)
output_file("timeseries.html")
show(vplot(tsline, tspoint))
"""
builder_type = BUILDER_TYPES.get(builder_type, builder_type)
kws['x'] = x
kws['y'] = y
return create_and_build(builder_type, data, **kws)
| bsd-3-clause |
stevengt/Degrees-of-Separation | degreesOfSeparation.py | 1 | 3467 |
"""These methods use SQLite to search an IMDb snapshot to
determine the degrees of separation among movies."""
import sqlite3 as sql
import sys
import Queue
import matplotlib.pyplot as plot
visitedActors = set()
visitedMovies = set()
indexByActors = dict()
indexByMovies = dict()
def graphDegrees(degreesOfSeparation, movieTitle
,movieYear,locationOfDatabaseFile):
"""Finds what percent of all movies are within n degrees of separation
from a given movie and graphs the distribution."""
global visitedActors
global visitedMovies
global indexByActors
global indexByMovies
try:
con = sql.connect(locationOfDatabaseFile)
cur = con.cursor()
movieQueue = Queue.Queue(maxsize=0)
cur.execute('''SELECT a1.aid FROM actor a1''')
actor = cur.fetchone()
while actor != None:
actorID = actor[0]
indexByActors[actorID] = set()
actor = cur.fetchone()
cur.execute('''SELECT m1.mid FROM movie m1''')
movie = cur.fetchone()
while movie != None:
movieID = movie[0]
indexByMovies[movieID] = set()
movie = cur.fetchone()
cur.execute('''SELECT DISTINCT r1.aid, r1.mid FROM role r1
ORDER BY r1.mid''')
current = cur.fetchone()
while current != None:
actorID = current[0]
movieID = current[1]
indexByActors[actorID].add(movieID)
indexByMovies[movieID].add(actorID)
current = cur.fetchone()
cur.execute('''SELECT m1.mid FROM movie m1 WHERE
m1.title = ? AND m1.year = ?''',(movieTitle,movieYear))
movieQueue.put(cur.fetchone()[0])
distribution = findDegrees(movieQueue,degreesOfSeparation)
xValues = range(degreesOfSeparation+1)
graph = plot.subplots()[1]
graph.bar(xValues,distribution)
plot.title("Degrees of Separation from " + movieTitle)
plot.xlabel("Degrees of Separation")
plot.ylabel("Percent of All Movies")
for i,label in enumerate(distribution):
label = int(round(label*100))
label = "%d%%" % label
graph.annotate(label,(xValues[i],distribution[i]))
plot.show()
#------------------------------------------------------------------------
except sql.Error,e:
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
def findDegrees(movieQueue, maxDegrees):
global visitedActors
global visitedMovies
global indexByActors
global indexByMovies
distribution = []
for degree in range(maxDegrees+1):
rowSize = movieQueue.qsize()
for i in range(rowSize):
movieID = movieQueue.get()
actorIDs = indexByMovies[movieID]
for actorID in actorIDs:
if actorID not in visitedActors:
visitedActors.add(actorID)
for movieID in indexByActors[actorID]:
if movieID not in visitedMovies:
movieQueue.put(movieID)
visitedMovies.add(movieID)
numMovies = len(visitedMovies)
total = len(indexByMovies)
distribution.append(float(numMovies)/total)
return distribution
| mit |
larsmans/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/io/date_converters.py | 10 | 1827 | """This module is designed for community supported date conversion functions"""
from pandas.compat import range, map
import numpy as np
import pandas._libs.lib as lib
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
return lib.try_parse_date_and_time(date_col, time_col)
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return lib.try_parse_year_month_day(year_col, month_col, day_col)
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
second_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
hour_col = _maybe_cast(hour_col)
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
return lib.try_parse_datetime_components(year_col, month_col, day_col,
hour_col, minute_col, second_col)
def generic_parser(parse_func, *cols):
N = _check_columns(cols)
results = np.empty(N, dtype=object)
for i in range(N):
args = [c[i] for c in cols]
results[i] = parse_func(*args)
return results
def _maybe_cast(arr):
if not arr.dtype.type == np.object_:
arr = np.array(arr, dtype=object)
return arr
def _check_columns(cols):
if not len(cols):
raise AssertionError("There must be at least 1 column")
head, tail = cols[0], cols[1:]
N = len(head)
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError('All columns must have the same length: {0}; '
'column {1} has length {2}'.format(N, i, n))
return N
| mit |
jbloomlab/phydms | tests/test_omegabysite.py | 1 | 4286 | """Tests ``--omegabysite`` option to ``phydms`` on simulate data.
Written by Jesse Bloom.
"""
import os
import unittest
import subprocess
import random
import pandas
import phydmslib.file_io
import phydmslib.models
import phydmslib.simulate
from phydmslib.constants import N_NT
import pyvolve
import numpy
class test_OmegaBySiteExpCM(unittest.TestCase):
"""Tests ``--omegabysite`` to ``phydms`` for `ExpCM`."""
def setUp(self):
"""Set up models."""
self.tree = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"./NP_data/NP_tree_short.newick"))
self.alignment = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"./NP_data/NP_alignment_short.fasta"))
self.prefs = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"./NP_data/NP_prefs_short.csv"))
self.nsites = (
len(phydmslib.file_io.ReadCodonAlignment(self.alignment,
True)[0][1]) // 3)
self.initializeModel()
self.outdir = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"./omegabysite_test_results/"))
if not os.path.isdir(self.outdir):
os.mkdir(self.outdir)
def initializeModel(self):
"""Init models."""
prefs = phydmslib.file_io.readPrefs(self.prefs, minpref=0.005)
prefs = [prefs[r] for r in sorted(prefs.keys())]
# Using beta < 1 partially flattens prefs in simulation
# Use mu < 1 to get branch lengths about right
self.model = phydmslib.models.ExpCM(prefs, beta=0.5, mu=0.5)
self.modelname = "ExpCM"
self.modelarg = "ExpCM_{0}".format(self.prefs)
def test_OnSimulatedData(self):
"""Test on Simulated Data."""
random.seed(1)
divpressuresites = random.sample(range(self.nsites), 5)
partitions = phydmslib.simulate.pyvolvePartitions(
self.model, (200.0, divpressuresites))
evolver = pyvolve.Evolver(
partitions=partitions, tree=pyvolve.read_tree(file=self.tree))
simulateprefix = os.path.join(self.outdir, self.modelname)
simulatedalignment = simulateprefix + "_simulatedalignment.fasta"
info = simulateprefix + "_temp_info.txt"
rates = simulateprefix + "_temp_ratefile.txt"
evolver(seqfile=simulatedalignment, infofile=info, ratefile=rates)
subprocess.check_call(["phydms", simulatedalignment, self.tree,
self.modelarg, simulateprefix, "--omegabysite",
"--brlen", "scale"])
omegabysitefile = simulateprefix + "_omegabysite.txt"
omegas = pandas.read_csv(omegabysitefile, sep="\t", comment="#")
divpressureomegas = omegas[omegas["site"].isin(divpressuresites)]
self.assertTrue(len(divpressureomegas) == len(divpressuresites))
self.assertTrue(
(divpressureomegas["omega"].values > 2).all(),
"Not all divpressure sites have omega > 2:\n{0}"
.format(divpressureomegas))
self.assertTrue(
(divpressureomegas["P"].values < 0.08).all(),
"Not all divpressure sites have P < 0.08:\n{0}"
.format(divpressureomegas))
nspurious = len(
omegas[
(omegas["omega"] > 2)
& (omegas["P"] < 0.05)
& (~omegas["site"].isin(divpressuresites))])
self.assertTrue(nspurious <= 1, "{0} spurious sites".format(nspurious))
for f in ["custom_matrix_frequencies.txt"]:
if os.path.isfile(f):
os.remove(f)
class test_OmegaBySiteYNGKP(test_OmegaBySiteExpCM):
"""Tests ``--omegabysite`` to ``phydms`` for `YNGKP_M0`."""
def initializeModel(self):
"""Init model."""
e_pw = numpy.full((3, N_NT), 1.0 / N_NT, dtype="float")
# mu > 1 leads to longer branches in simulation
self.model = phydmslib.models.YNGKP_M0(e_pw, self.nsites, mu=4.0)
self.modelname = "YNGKP"
self.modelarg = "YNGKP_M0"
if __name__ == "__main__":
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner)
| gpl-3.0 |
rubikloud/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
liyu1990/sklearn | sklearn/gaussian_process/tests/test_gpc.py | 28 | 6061 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater, assert_equal,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4)
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(9):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
""" Test that GPC can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 1000 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(1000):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
""" Test GPC for multi-class classification problems. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
""" Test that multi-class GPC produces identical results with n_jobs>1. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
| bsd-3-clause |
lbdreyer/cartopy | docs/source/conf.py | 1 | 11935 | # (C) British Crown Copyright 2011 - 2013, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
#
# cartopy documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 16 09:41:05 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'cartopy.sphinxext.summarise_package',
'cartopy.sphinxext.gallery',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive'
]
import matplotlib
matplotlib.use('Agg')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cartopy'
copyright = u'2011 - 2014 British Crown Copyright' # the template will need updating if this is changed
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cartopy
version = cartopy.__version__
# The full version, including alpha/beta/rc tags.
release = cartopy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cartopydoc'
html_context = {'rellinks': [('genindex', 'General Index', 'I', 'index'),
('cartopy_outline', 'Module outline', 'O', 'outline')]}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cartopy.tex', u'Cartopy Introduction',
u'Philip Elson, Richard Hattersley', 'manual', False),
('introductory_examples/index', 'cartopy_examples.tex', u'Cartopy examples',
u'Philip Elson, Richard Hattersley', 'manual', True)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cartopy', u'cartopy Documentation',
[u'Philip Elson, Richard Hattersley'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cartopy', u'cartopy Documentation',
u'Philip Elson, Richard Hattersley', 'cartopy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'cartopy'
epub_author = u'Philip Elson, Richard Hattersley'
epub_publisher = u'Philip Elson, Richard Hattersley'
epub_copyright = u'2012, Philip Elson, Richard Hattersley'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/2', None),
'matplotlib': ('http://matplotlib.org', None)}
############ extlinks extension ############
extlinks = {'issues': ('https://github.com/SciTools/cartopy/issues?milestone=&state=open&labels=%s',
'issues labeled with '),
'issue': ('https://github.com/SciTools/cartopy/issues/%s', 'Issue #'),
'pull': ('https://github.com/SciTools/cartopy/pull/%s', 'PR #'),
}
############ package summary extension ###########
summarise_package_names = ['cartopy']
summarise_package_exclude_directories = [['tests', 'examples', 'sphinxext']]
summarise_package_fnames = ['cartopy_outline.rst']
############ gallery/examples extension ###########
#gallery_allowed_tags = None
#gallery_tag_order = None
gallery_name = 'gallery.rst'
examples_package_name = 'cartopy.examples'
############ plot directive ##############
plot_html_show_formats = False
#plot_rcparams = {'figure.autolayout': True}
plot_rcparams = {'figure.subplot.bottom': 0.04,
'figure.subplot.top': 0.96,
'figure.subplot.left': 0.04,
'figure.subplot.right': 0.96}
plot_formats = (('thumb.png', 20),
'png',
'pdf'
)
############ autodoc config ##############
autoclass_content = 'both'
| lgpl-3.0 |
Juanlu001/aquagpusph | examples/2D/spheric_testcase9_tld/cMake/plot_m.py | 1 | 6867 | #******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <jl.cercos@upm.es> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import sys
import os
from os import path
try:
from PyQt4 import QtGui
except:
try:
from PySide import QtGui
except:
raise ImportError("PyQt4 or PySide is required to use this tool")
try:
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except:
raise ImportError("matplotlib is required to use this tool")
class FigureController(FigureCanvas):
"""Matplotlib figure widget controller"""
def __init__(self):
"""Constructor"""
# Create the figure in the canvas
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
# generates first "empty" plot
t = [0.0]
exp_a = [0.0]
a = [0.0]
m = [0.0]
self.exp_line, = self.ax.plot(t,
exp_a,
label=r'$\theta_{Exp}$',
color="black",
linestyle="--",
linewidth=1.0)
self.line, = self.ax.plot(t,
a,
label=r'$\theta_{SPH}$',
color="black",
linestyle="-",
linewidth=1.0)
# Set some options
self.ax.grid()
self.ax.legend(loc='upper left')
self.ax.set_xlim(0, 0.1)
self.ax.set_ylim(-0.1, 0.1)
self.ax.set_autoscale_on(False)
self.ax.set_xlabel(r"$t \, [\mathrm{s}]$", fontsize=21)
self.ax.set_ylabel(r"$\theta \, [\mathrm{deg}]$", fontsize=21)
# Create a second moment plot
self.ax2 = self.ax.twinx()
self.mline, = self.ax2.plot(t,
m,
label=r'$M_{SPH}$',
color="blue",
linewidth=1.0)
# Set some options
self.ax2.set_xlim(0, 0.1)
self.ax2.set_ylim(-0.1, 0.1)
self.ax2.set_autoscale_on(False)
self.ax2.set_ylabel(r"$M_{fluid} \, [\mathrm{N} \cdot \mathrm{m}]$",
fontsize=21,
color="blue")
for tl in self.ax2.get_yticklabels():
tl.set_color("blue")
# force the figure redraw
self.fig.canvas.draw()
# call the update method (to speed-up visualization)
self.timerEvent(None)
# start timer, trigger event every 1000 millisecs (=1sec)
self.timer = self.startTimer(1000)
def readFile(self, filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[:-1]: # Skip the last line, which may be unready
l = l.strip()
fields = l.split('\t')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return map(list, zip(*data))
def timerEvent(self, evt):
"""Custom timerEvent code, called at timer event receive"""
# Read and plot the new data
data = self.readFile('Motion.dat')
t = data[0]
exp_a = data[2]
a = data[3]
m = data[5]
self.exp_line.set_data(t, exp_a)
self.line.set_data(t, a)
self.mline.set_data(t, m)
self.ax.set_xlim(0, t[-1])
ymax_exp = max(abs(max(exp_a)), abs(min(exp_a)))
ymax_sph = max(abs(max(a)), abs(min(a)))
ymax = max((ymax_exp, ymax_sph))
self.ax.set_ylim(-1.1 * ymax, 1.1 * ymax)
self.ax2.set_xlim(0, t[-1])
ymax = max(abs(max(m)), abs(min(m)))
self.ax2.set_ylim(-1.1 * ymax, 1.1 * ymax)
for tl in self.ax2.get_yticklabels():
tl.set_color("blue")
# Redraw
self.fig.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
widget = FigureController()
widget.setWindowTitle("Roll angle")
widget.show()
sys.exit(app.exec_())
| gpl-3.0 |
astocko/statsmodels | statsmodels/tsa/statespace/sarimax.py | 6 | 80033 | """
SARIMAX Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .mlemodel import MLEModel, MLEResults
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but reuslts in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
k_seasons : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.k_seasons = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], int):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], int):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], int):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
self.polynomial_seasonal_ar[(i + 1) * self.k_seasons] = (
seasonal_order[0][i]
)
if isinstance(seasonal_order[2], int):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
self.polynomial_seasonal_ma[(i + 1) * self.k_seasons] = (
seasonal_order[2][i]
)
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
self._k_order = 0
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = exog.to_frame()
self.k_exog = exog.shape[1]
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.k_seasons * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Perform simple differencing if requested
if (simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
# Save the originals
self.orig_endog = endog
self.orig_exog = exog
# Perform simple differencing
endog = diff(endog.copy(), self._k_diff, self._k_seasonal_diff,
self.k_seasons)
if exog is not None:
exog = diff(exog.copy(), self._k_diff, self._k_seasonal_diff,
self.k_seasons)
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.k_seasons * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Set the default results class to be SARIMAXResults
kwargs.setdefault('results_class', SARIMAXResults)
self._init_kwargs = kwargs # store to recreate model
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Initialize the fixed components of the statespace model
self.design = self.initial_design
self.state_intercept = self.initial_state_intercept
self.transition = self.initial_transition
self.selection = self.initial_selection
# If we are estimating a simple ARMA model, then we can use a faster
# initialization method.
if k_diffuse_states == 0:
self.initialize_stationary()
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609
kwds = super(SARIMAX, self)._get_init_kwds()
kwds['endog'] = self.orig_endog
kwds['exog'] = self.orig_exog
if self.initialization == 'approximate_diffuse':
import warnings
warnings.warn('not all init keys or initialization is available yet')
return kwds
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
# Internal flag for whether the default mixed approximate diffuse /
# stationary initialization has been overridden with a user-supplied
# initialization
self._manual_initialization = False
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_known(self, initial_state, initial_state_cov):
self._manual_initialization = True
super(SARIMAX, self).initialize_known(initial_state, initial_state_cov)
initialize_known.__doc__ = MLEModel.initialize_known.__doc__
def initialize_approximate_diffuse(self, variance=None):
self._manual_initialization = True
super(SARIMAX, self).initialize_approximate_diffuse(variance)
initialize_approximate_diffuse.__doc__ = (
MLEModel.initialize_approximate_diffuse.__doc__
)
def initialize_stationary(self):
self._manual_initialization = True
super(SARIMAX, self).initialize_stationary()
initialize_stationary.__doc__ = MLEModel.initialize_stationary.__doc__
def initialize_state(self, variance=None):
"""
Initialize state and state covariance arrays in preparation for the
Kalman filter.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
can be found in the Representation class documentation.
Notes
-----
Initializes the ARMA component of the state space to the typical
stationary values and the other components as approximate diffuse.
Can be overridden be calling one of the other initialization methods
before fitting the model.
"""
# Check if a manual initialization has already been specified
if self._manual_initialization:
return
# If we're not enforcing stationarity, then we can't initialize a
# stationary component
if not self.enforce_stationarity:
self.initialize_approximate_diffuse(variance)
return
# Otherwise, create the initial state and state covariance matrix
# as from a combination of diffuse and stationary components
# Create initialized non-stationary components
if variance is None:
variance = self.initial_variance
initial_state = np.zeros(self.k_states, dtype=self.transition.dtype)
initial_state_cov = (
np.eye(self.k_states, dtype=self.transition.dtype) * variance
)
# Get the offsets (from the bottom or bottom right of the vector /
# matrix) for the stationary component.
if self.state_regression:
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
start = -self._k_order
end = None
# Add in the initialized stationary components
if self._k_order > 0:
selection_stationary = self.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.state_cov[:, :, 0]),
selection_stationary.T
)
initial_state_cov_stationary = solve_discrete_lyapunov(
self.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
super(SARIMAX, self).initialize_known(initial_state, initial_state_cov)
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.k_seasons).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.k_seasons
end = self._k_diff + (d + 1) * self.k_seasons
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.k_seasons - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.k_seasons > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff
)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params_ar)
# Run an ARMA(p,q) model using the just computed residuals as data
Y = endog[r:]
X = np.empty((Y.shape[0], 0))
if k_trend > 0:
if trend_data is None:
raise ValueError('Trend data must be provided if'
' `k_trend` > 0.')
X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]
if k_ar > 0:
cols = polynomial_ar.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]
if k_ma > 0:
cols = polynomial_ma.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]
# Get the array of [ar_params, ma_params]
params = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params)
# Default output
params_trend = []
params_ar = []
params_ma = []
params_variance = []
# Get the params
offset = 0
if k_trend > 0:
params_trend = params[offset:k_trend + offset]
offset += k_trend
if k_ar > 0:
params_ar = params[offset:k_params_ar + offset]
offset += k_params_ar
if k_ma > 0:
params_ma = params[offset:k_params_ma + offset]
offset += k_params_ma
if residuals is not None:
params_variance = (residuals[k_params_ma:]**2).mean()
return (params_trend, params_ar, params_ma,
params_variance)
@property
def start_params(self):
"""
Starting parameters for maximum likelihood estimation
"""
# Perform differencing if necessary (i.e. if simple differencing is
# false so that the state-space model will use the entire dataset)
trend_data = self._trend_data
if not self.simple_differencing and (
self._k_diff > 0 or self._k_seasonal_diff > 0):
endog = diff(self.endog[0, :], self._k_diff,
self._k_seasonal_diff, self.k_seasons)
if self.exog is not None:
exog = diff(self.exog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
else:
exog = None
trend_data = trend_data[:endog.shape[0], :]
else:
endog = self.endog.copy()[0, :]
exog = self.exog.copy() if self.exog is not None else None
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
endog = endog[~np.isnan(endog)]
if exog is not None:
exog = exog[~np.isnan(endog)]
if trend_data is not None:
trend_data = trend_data[~np.isnan(endog)]
# Regression effects via OLS
params_exog = []
if self.k_exog > 0:
params_exog = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, params_exog)
if self.state_regression:
params_exog = []
# Non-seasonal ARMA component and trend
(params_trend, params_ar, params_ma,
params_variance) = self._conditional_sum_squares(
endog, self.k_ar, self.polynomial_ar, self.k_ma,
self.polynomial_ma, self.k_trend, trend_data
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_ar = (
self.k_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_ar])
)
if invalid_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_ma = (
self.k_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_ma])
)
if invalid_ma:
raise ValueError('non-invertible starting MA parameters found'
' with `enforce_invertibility` set to True.')
# Seasonal Parameters
_, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (
self._conditional_sum_squares(
endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,
self.k_seasonal_ma, self.polynomial_seasonal_ma
)
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_seasonal_ar = (
self.k_seasonal_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_seasonal_ar])
)
if invalid_seasonal_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_seasonal_ma = (
self.k_seasonal_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_seasonal_ma])
)
if invalid_seasonal_ma:
raise ValueError('non-invertible starting seasonal moving average'
' parameters found with `enforce_invertibility`'
' set to True.')
# Variances
params_exog_variance = []
if self.state_regression and self.time_varying_regression:
# TODO how to set the initial variance parameters?
params_exog_variance = [1] * self.k_exog
if self.state_error and params_variance == []:
if not params_seasonal_variance == []:
params_variance = params_seasonal_variance
elif self.k_exog > 0:
params_variance = np.dot(endog, endog)
else:
params_variance = 1
params_measurement_variance = 1 if self.measurement_error else []
# Combine all parameters
return np.r_[
params_trend,
params_exog,
params_ar,
params_ma,
params_seasonal_ar,
params_seasonal_ma,
params_exog_variance,
params_measurement_variance,
params_variance
]
@property
def endog_names(self, latex=False):
"""Names of endogenous variables"""
diff = ''
if self.k_diff > 0:
if self.k_diff == 1:
diff = '\Delta' if latex else 'D'
else:
diff = ('\Delta^%d' if latex else 'D%d') % self.k_diff
seasonal_diff = ''
if self.k_seasonal_diff > 0:
if self.k_seasonal_diff == 1:
seasonal_diff = (('\Delta_%d' if latex else 'DS%d') %
(self.k_seasons))
else:
seasonal_diff = (('\Delta_%d^%d' if latex else 'D%dS%d') %
(self.k_seasonal_diff, self.k_seasons))
endog_diff = self.simple_differencing
if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:
return (('%s%s %s' if latex else '%s.%s.%s') %
(diff, seasonal_diff, self.data.ynames))
elif endog_diff and self.k_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(diff, self.data.ynames))
elif endog_diff and self.k_seasonal_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(seasonal_diff, self.data.ynames))
else:
return self.data.ynames
params_complete = [
'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',
'exog_variance', 'measurement_variance', 'variance'
]
@property
def param_terms(self):
"""
List of parameters actually included in the model, in sorted order.
TODO Make this an OrderedDict with slice or indices as the values.
"""
model_orders = self.model_orders
# Get basic list from model orders
params = [
order for order in self.params_complete
if model_orders[order] > 0
]
# k_exog may be positive without associated parameters if it is in the
# state vector
if 'exog' in params and not self.mle_regression:
params.remove('exog')
return params
@property
def param_names(self):
"""
List of human readable parameter names (for parameters actually
included in the model).
"""
params_sort_order = self.param_terms
model_names = self.model_names
return [
name for param in params_sort_order for name in model_names[param]
]
@property
def model_orders(self):
"""
The orders of each of the polynomials in the model.
"""
return {
'trend': self.k_trend,
'exog': self.k_exog,
'ar': self.k_ar,
'ma': self.k_ma,
'seasonal_ar': self.k_seasonal_ar,
'seasonal_ma': self.k_seasonal_ma,
'reduced_ar': self.k_ar + self.k_seasonal_ar,
'reduced_ma': self.k_ma + self.k_seasonal_ma,
'exog_variance': self.k_exog if (
self.state_regression and self.time_varying_regression) else 0,
'measurement_variance': int(self.measurement_error),
'variance': int(self.state_error),
}
@property
def model_names(self):
"""
The plain text names of all possible model parameters.
"""
return self._get_model_names(latex=False)
@property
def model_latex_names(self):
"""
The latex names of all possible model parameters.
"""
return self._get_model_names(latex=True)
def _get_model_names(self, latex=False):
names = {
'trend': None,
'exog': None,
'ar': None,
'ma': None,
'seasonal_ar': None,
'seasonal_ma': None,
'reduced_ar': None,
'reduced_ma': None,
'exog_variance': None,
'measurement_variance': None,
'variance': None,
}
# Trend
if self.k_trend > 0:
trend_template = 't_%d' if latex else 'trend.%d'
names['trend'] = []
for i in self.polynomial_trend.nonzero()[0]:
if i == 0:
names['trend'].append('intercept')
elif i == 1:
names['trend'].append('drift')
else:
names['trend'].append(trend_template % i)
# Exogenous coefficients
if self.k_exog > 0:
names['exog'] = self.exog_names
# Autoregressive
if self.k_ar > 0:
ar_template = '$\\phi_%d$' if latex else 'ar.L%d'
names['ar'] = []
for i in self.polynomial_ar.nonzero()[0][1:]:
names['ar'].append(ar_template % i)
# Moving Average
if self.k_ma > 0:
ma_template = '$\\theta_%d$' if latex else 'ma.L%d'
names['ma'] = []
for i in self.polynomial_ma.nonzero()[0][1:]:
names['ma'].append(ma_template % i)
# Seasonal Autoregressive
if self.k_seasonal_ar > 0:
seasonal_ar_template = (
'$\\tilde \\phi_%d$' if latex else 'ar.S.L%d'
)
names['seasonal_ar'] = []
for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:
names['seasonal_ar'].append(seasonal_ar_template % i)
# Seasonal Moving Average
if self.k_seasonal_ma > 0:
seasonal_ma_template = (
'$\\tilde \\theta_%d$' if latex else 'ma.S.L%d'
)
names['seasonal_ma'] = []
for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:
names['seasonal_ma'].append(seasonal_ma_template % i)
# Reduced Form Autoregressive
if self.k_ar > 0 or self.k_seasonal_ar > 0:
reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
ar_template = '$\\Phi_%d$' if latex else 'ar.R.L%d'
names['reduced_ar'] = []
for i in reduced_polynomial_ar.nonzero()[0][1:]:
names['reduced_ar'].append(ar_template % i)
# Reduced Form Moving Average
if self.k_ma > 0 or self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
ma_template = '$\\Theta_%d$' if latex else 'ma.R.L%d'
names['reduced_ma'] = []
for i in reduced_polynomial_ma.nonzero()[0][1:]:
names['reduced_ma'].append(ma_template % i)
# Exogenous variances
if self.state_regression and self.time_varying_regression:
exog_var_template = '$\\sigma_\\text{%s}^2$' if latex else 'var.%s'
names['exog_variance'] = [
exog_var_template % exog_name for exog_name in self.exog_names
]
# Measurement error variance
if self.measurement_error:
meas_var_tpl = (
'$\\sigma_\\eta^2$' if latex else 'var.measurement_error'
)
names['measurement_variance'] = [meas_var_tpl]
# State variance
if self.state_error:
var_tpl = '$\\sigma_\\zeta^2$' if latex else 'sigma2'
names['variance'] = [var_tpl]
return names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Used primarily to enforce stationarity of the autoregressive lag
polynomial, invertibility of the moving average lag polynomial, and
positive variance parameters.
Parameters
----------
unconstrained : array_like
Unconstrained parameters used by the optimizer.
Returns
-------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
constrained = np.zeros(unconstrained.shape, unconstrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
constrained[start:end] = unconstrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ma_params
# Transform the standard deviation parameters to be positive
if self.state_regression and self.time_varying_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]**2
start += self.k_exog
if self.measurement_error:
constrained[start] = unconstrained[start]**2
start += 1
end += 1
if self.state_error:
constrained[start] = unconstrained[start]**2
# start += 1
# end += 1
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Used primarily to reverse enforcement of stationarity of the
autoregressive lag polynomial and invertibility of the moving average
lag polynomial.
Parameters
----------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Returns
-------
constrained : array_like
Unconstrained parameters used by the optimizer.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
unconstrained = np.zeros(constrained.shape, constrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
unconstrained[start:end] = constrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ma_params
# Untransform the standard deviation
if self.state_regression and self.time_varying_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]**0.5
start += self.k_exog
if self.measurement_error:
unconstrained[start] = constrained[start]**0.5
start += 1
end += 1
if self.state_error:
unconstrained[start] = constrained[start]**0.5
# start += 1
# end += 1
return unconstrained
def update(self, params, transformed=True, set_params=True):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True.
set_params : boolean
Whether or not to copy `params` to the model object's params
attribute. Usually set to True.
Returns
-------
params : array_like
Array of parameters.
"""
params = super(SARIMAX, self).update(params, transformed, set_params)
params_trend = None
params_exog = None
params_ar = None
params_ma = None
params_seasonal_ar = None
params_seasonal_ma = None
params_exog_variance = None
params_measurement_variance = None
params_variance = None
# Extract the parameters
start = end = 0
end += self.k_trend
params_trend = params[start:end]
start += self.k_trend
if self.mle_regression:
end += self.k_exog
params_exog = params[start:end]
start += self.k_exog
end += self.k_ar_params
params_ar = params[start:end]
start += self.k_ar_params
end += self.k_ma_params
params_ma = params[start:end]
start += self.k_ma_params
end += self.k_seasonal_ar_params
params_seasonal_ar = params[start:end]
start += self.k_seasonal_ar_params
end += self.k_seasonal_ma_params
params_seasonal_ma = params[start:end]
start += self.k_seasonal_ma_params
if self.state_regression and self.time_varying_regression:
end += self.k_exog
params_exog_variance = params[start:end]
start += self.k_exog
if self.measurement_error:
params_measurement_variance = params[start]
start += 1
end += 1
if self.state_error:
params_variance = params[start]
# start += 1
# end += 1
# Update lag polynomials
if self.k_ar > 0:
if self.polynomial_ar.dtype == params.dtype:
self.polynomial_ar[self._polynomial_ar_idx] = -params_ar
else:
polynomial_ar = self.polynomial_ar.real.astype(params.dtype)
polynomial_ar[self._polynomial_ar_idx] = -params_ar
self.polynomial_ar = polynomial_ar
if self.k_ma > 0:
if self.polynomial_ma.dtype == params.dtype:
self.polynomial_ma[self._polynomial_ma_idx] = params_ma
else:
polynomial_ma = self.polynomial_ma.real.astype(params.dtype)
polynomial_ma[self._polynomial_ma_idx] = params_ma
self.polynomial_ma = polynomial_ma
if self.k_seasonal_ar > 0:
idx = self._polynomial_seasonal_ar_idx
if self.polynomial_seasonal_ar.dtype == params.dtype:
self.polynomial_seasonal_ar[idx] = -params_seasonal_ar
else:
polynomial_seasonal_ar = (
self.polynomial_seasonal_ar.real.astype(params.dtype)
)
polynomial_seasonal_ar[idx] = -params_seasonal_ar
self.polynomial_seasonal_ar = polynomial_seasonal_ar
if self.k_seasonal_ma > 0:
idx = self._polynomial_seasonal_ma_idx
if self.polynomial_seasonal_ma.dtype == params.dtype:
self.polynomial_seasonal_ma[idx] = params_seasonal_ma
else:
polynomial_seasonal_ma = (
self.polynomial_seasonal_ma.real.astype(params.dtype)
)
polynomial_seasonal_ma[idx] = params_seasonal_ma
self.polynomial_seasonal_ma = polynomial_seasonal_ma
# Get the reduced form lag polynomial terms by multiplying the regular
# and seasonal lag polynomials
# Note: that although the numpy np.polymul examples assume that they
# are ordered from highest degree to lowest, whereas our are from
# lowest to highest, it does not matter.
if self.k_seasonal_ar > 0:
reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
else:
reduced_polynomial_ar = -self.polynomial_ar
if self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
else:
reduced_polynomial_ma = self.polynomial_ma
# Observation intercept
# Exogenous data with MLE estimation of parameters enters through a
# time-varying observation intercept (is equivalent to simply
# subtracting it out of the endogenous variable first)
if self.mle_regression:
self['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]
# State intercept (Harvey) or additional observation intercept
# (Hamilton)
# SARIMA trend enters through the a time-varying state intercept,
# associated with the first row of the stationary component of the
# state vector (i.e. the first element of the state vector following
# any differencing elements)
if self.k_trend > 0:
data = np.dot(self._trend_data, params_trend).astype(params.dtype)
if not self.hamilton_representation:
self['state_intercept', self._k_states_diff, :] = data
else:
# The way the trend enters in the Hamilton representation means
# that the parameter is not an ``intercept'' but instead the
# mean of the process. The trend values in `data` are meant for
# an intercept, and so must be transformed to represent the
# mean instead
if self.hamilton_representation:
data /= np.sum(-reduced_polynomial_ar)
# If we already set the observation intercept for MLE
# regression, just add to it
if self.mle_regression:
self.obs_intercept += data[None, :]
# Otherwise set it directly
else:
self.obs_intercept = data[None, :]
# Observation covariance matrix
if self.measurement_error:
self['obs_cov', 0, 0] = params_measurement_variance
# Transition matrix
if self.k_ar > 0 or self.k_seasonal_ar > 0:
self[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]
elif not self.transition.dtype == params.dtype:
# This is required if the transition matrix is not really in use
# (e.g. for an MA(q) process) so that it's dtype never changes as
# the parameters' dtype changes. This changes the dtype manually.
self.transition = self.transition.real.astype(params.dtype)
# Selection matrix (Harvey) or Design matrix (Hamilton)
if self.k_ma > 0 or self.k_seasonal_ma > 0:
if not self.hamilton_representation:
self[self.selection_ma_params_idx] = reduced_polynomial_ma[1:]
else:
self[self.design_ma_params_idx] = reduced_polynomial_ma[1:]
# State covariance matrix
if self.k_posdef > 0:
self['state_cov', 0, 0] = params_variance
if self.state_regression and self.time_varying_regression:
self[self._exog_variance_idx] = params_exog_variance
# Initialize
if not self._manual_initialization:
self.initialize_state()
return params
class SARIMAXResults(MLEResults):
"""
Class to hold results from fitting an SARIMAX model.
Parameters
----------
model : SARIMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the SARIMAX model instance.
polynomial_ar : array
Array containing autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients, ordered from lowest
degree to highest. Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
model_orders : list of int
The orders of each of the polynomials in the model.
param_terms : list of str
List of parameters actually included in the model, in sorted order.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model):
super(SARIMAXResults, self).__init__(model)
self.df_resid = np.inf # attribute required for wald tests
self.specification = {
# Set additional model parameters
'k_seasons': self.model.k_seasons,
'measurement_error': self.model.measurement_error,
'time_varying_regression': self.model.time_varying_regression,
'mle_regression': self.model.mle_regression,
'simple_differencing': self.model.simple_differencing,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'hamilton_representation': self.model.hamilton_representation,
'order': self.model.order,
'seasonal_order': self.model.seasonal_order,
# Model order
'k_diff': self.model.k_diff,
'k_seasonal_diff': self.model.k_seasonal_diff,
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
'k_seasonal_ar': self.model.k_seasonal_ar,
'k_seasonal_ma': self.model.k_seasonal_ma,
# Param Numbers
'k_ar_params': self.model.k_ar_params,
'k_ma_params': self.model.k_ma_params,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
'mle_regression': self.model.mle_regression,
'state_regression': self.model.state_regression,
}
# Polynomials
self.polynomial_trend = self.model.polynomial_trend
self.polynomial_ar = self.model.polynomial_ar
self.polynomial_ma = self.model.polynomial_ma
self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar
self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma
self.polynomial_reduced_ar = np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
self.polynomial_reduced_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
# Distinguish parameters
self.model_orders = self.model.model_orders
self.param_terms = self.model.param_terms
start = end = 0
for name in self.param_terms:
end += self.model_orders[name]
setattr(self, '_params_%s' % name, self.params[start:end])
start += self.model_orders[name]
@cache_readonly
def arroots(self):
"""
(array) Roots of the reduced form autoregressive lag polynomial
"""
return np.roots(self.polynomial_reduced_ar)**-1
@cache_readonly
def maroots(self):
"""
(array) Roots of the reduced form moving average lag polynomial
"""
return np.roots(self.polynomial_reduced_ma)**-1
@cache_readonly
def arfreq(self):
"""
(array) Frequency of the roots of the reduced form autoregressive
lag polynomial
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def mafreq(self):
"""
(array) Frequency of the roots of the reduced form moving average
lag polynomial
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def arparams(self):
"""
(array) Autoregressive parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ar
@cache_readonly
def maparams(self):
"""
(array) Moving average parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ma
def predict(self, start=None, end=None, exog=None, dynamic=False,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : boolean, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.orig_exog.T, exog.T].T
model = SARIMAX(
endog,
exog=exog,
order=self.model.order,
seasonal_order=self.model.seasonal_order,
trend=self.model.trend,
measurement_error=self.model.measurement_error,
time_varying_regression=self.model.time_varying_regression,
mle_regression=self.model.mle_regression,
simple_differencing=self.model.simple_differencing,
enforce_stationarity=self.model.enforce_stationarity,
enforce_invertibility=self.model.enforce_invertibility,
hamilton_representation=self.model.hamilton_representation
)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.shapes.keys():
if name == 'obs':
continue
mat = getattr(model, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.')
return super(SARIMAXResults, self).predict(
start=start, end=end, exog=exog, dynamic=dynamic,
**kwargs
)
def forecast(self, steps=1, exog=None, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, optional
The number of out of sample forecasts from the end of the
sample. Default is 1.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables for
each step forecasted.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
return super(SARIMAXResults, self).forecast(
steps, exog=exog, **kwargs
)
def summary(self, alpha=.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ''
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ''
has_seasonal = (
self.model.k_seasonal_ar +
self.model.k_seasonal_diff +
self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = (
int(self.model.k_seasonal_ar / self.model.k_seasons)
)
else:
order_seasonal_ar = (
tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
)
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = (
int(self.model.k_seasonal_ma / self.model.k_seasons)
)
else:
order_seasonal_ma = (
tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = ('(%s, %d, %s, %d)' %
(str(order_seasonal_ar), k_seasonal_diff,
str(order_seasonal_ma), self.model.k_seasons))
if not order == '':
order += 'x'
model_name = ('%s%s%s' %
(self.model.__class__.__name__, order, seasonal_order))
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
| bsd-3-clause |
lcnature/brainiak | tests/funcalign/test_srm.py | 4 | 10913 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
def test_can_instantiate():
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
print("Test: running SRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
s.fit(X)
from pathlib import Path
sr_v0_4 = np.load(Path(__file__).parent / "sr_v0_4.npz")['sr']
assert(np.allclose(sr_v0_4, s.s_))
assert len(s.w_) == subjects, (
"Invalid computation of SRM! (wrong # subjects in W)")
for subject in range(subjects):
assert s.w_[subject].shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels in W)")
assert s.w_[subject].shape[1] == features, (
"Invalid computation of SRM! (wrong # features in W)")
ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])
- np.eye(s.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in SRM."
difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert s.s_.shape[0] == features, (
"Invalid computation of SRM! (wrong # features in S)")
assert s.s_.shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = s.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of SRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of SRM! (wrong # features after transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples after transform)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
s.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
s.set_params(features=(samples+1))
s.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
s.fit(X)
print("Test: different number of samples per subject")
def test_new_subject():
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 3
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform_subject(X)
print("Test: transforming before fitting the model")
# Check that runs with 3 subject
s.fit(X)
# Check that you get an error when the data is the wrong shape
with pytest.raises(ValueError):
s.transform_subject(X[0].T)
# Check that it does run to compute a new subject
new_w = s.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
# Check that these analyses work with the deterministic SRM too
ds = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
ds.transform_subject(X)
print("Test: transforming before fitting the model")
# Check that runs with 3 subject
ds.fit(X)
# Check that you get an error when the data is the wrong shape
with pytest.raises(ValueError):
ds.transform_subject(X[0].T)
# Check that it does run to compute a new subject
new_w = ds.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
def test_det_srm():
import brainiak.funcalign.srm
model = brainiak.funcalign.srm.DetSRM()
assert model, "Invalid DetSRM instance!"
import numpy as np
voxels = 100
samples = 500
subjects = 2
features = 3
model = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)
assert model, "Invalid DetSRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
model.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
model.fit(X)
print("Test: running DetSRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
model.fit(X)
assert len(model.w_) == subjects, (
"Invalid computation of DetSRM! (wrong # subjects in W)")
for subject in range(subjects):
assert model.w_[subject].shape[0] == voxels, (
"Invalid computation of DetSRM! (wrong # voxels in W)")
assert model.w_[subject].shape[1] == features, (
"Invalid computation of DetSRM! (wrong # features in W)")
ortho = np.linalg.norm(model.w_[subject].T.dot(model.w_[subject])
- np.eye(model.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in DetSRM."
difference = np.linalg.norm(X[subject]
- model.w_[subject].dot(model.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert model.s_.shape[0] == features, (
"Invalid computation of DetSRM! (wrong # features in S)")
assert model.s_.shape[1] == samples, (
"Invalid computation of DetSRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = model.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of DetSRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of DetSRM! (wrong # features after "
"transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of DetSRM! (wrong # samples after transform)")
# Check that it does run to compute a new subject
new_w = model.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
model.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
model.set_params(features=(samples+1))
model.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
model.fit(X)
print("Test: different number of samples per subject")
| apache-2.0 |
jseabold/statsmodels | statsmodels/tsa/filters/hp_filter.py | 4 | 3240 |
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import spsolve
from statsmodels.tools.validation import array_like, PandasWrapper
def hpfilter(x, lamb=1600):
"""
Hodrick-Prescott filter.
Parameters
----------
x : array_like
The time series to filter, 1-d.
lamb : float
The Hodrick-Prescott smoothing parameter. A value of 1600 is
suggested for quarterly data. Ravn and Uhlig suggest using a value
of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
data.
Returns
-------
cycle : ndarray
The estimated cycle in the data given lamb.
trend : ndarray
The estimated trend in the data given lamb.
See Also
--------
statsmodels.tsa.filters.bk_filter.bkfilter
Baxter-King filter.
statsmodels.tsa.filters.cf_filter.cffilter
The Christiano Fitzgerald asymmetric, random walk filter.
statsmodels.tsa.seasonal.seasonal_decompose
Decompose a time series using moving averages.
statsmodels.tsa.seasonal.STL
Season-Trend decomposition using LOESS.
Notes
-----
The HP filter removes a smooth trend, `T`, from the data `x`. by solving
min sum((x[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
T t
Here we implemented the HP filter as a ridge-regression rule using
scipy.sparse. In this sense, the solution can be written as
T = inv(I + lamb*K'K)x
where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
such that
K[i,j] = 1 if i == j or i == j + 2
K[i,j] = -2 if i == j + 1
K[i,j] = 0 otherwise
References
----------
Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
Empirical Investigation." `Carnegie Mellon University discussion
paper no. 451`.
Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
Filter for the Frequency of Observations." `The Review of Economics and
Statistics`, 84(2), 371-80.
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.period_range('1959Q1', '2009Q3', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cycle, trend = sm.tsa.filters.hpfilter(dta.realgdp, 1600)
>>> gdp_decomp = dta[['realgdp']]
>>> gdp_decomp["cycle"] = cycle
>>> gdp_decomp["trend"] = trend
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax,
... fontsize=16)
>>> plt.show()
.. plot:: plots/hpf_plot.py
"""
pw = PandasWrapper(x)
x = array_like(x, 'x', ndim=1)
nobs = len(x)
I = sparse.eye(nobs, nobs) # noqa:E741
offsets = np.array([0, 1, 2])
data = np.repeat([[1.], [-2.], [1.]], nobs, axis=1)
K = sparse.dia_matrix((data, offsets), shape=(nobs - 2, nobs))
use_umfpack = True
trend = spsolve(I+lamb*K.T.dot(K), x, use_umfpack=use_umfpack)
cycle = x - trend
return pw.wrap(cycle, append='cycle'), pw.wrap(trend, append='trend')
| bsd-3-clause |
mrcslws/nupic.research | projects/whydense/mnist/analyze_noise.py | 3 | 5135 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import logging
from os.path import basename, dirname
from pathlib import Path
import click
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from torchvision import datasets, transforms
from nupic.research.frameworks.pytorch.image_transforms import RandomNoise
from nupic.research.support import load_ray_tune_experiment, parse_config
logging.basicConfig(level=logging.ERROR)
matplotlib.use("Agg")
NOISE_VALUES = [
"0.0",
"0.05",
"0.1",
"0.15",
"0.2",
"0.25",
"0.3",
"0.35",
"0.4",
"0.45",
"0.5",
]
EXPERIMENTS = {
"denseCNN1": {"label": "dense-CNN1", "linestyle": "--", "marker": "o"},
"denseCNN2": {"label": "dense-CNN2", "linestyle": "--", "marker": "x"},
"sparseCNN1": {"label": "sparse-CNN1", "linestyle": "-", "marker": "*"},
"sparseCNN2": {"label": "sparse-CNN2", "linestyle": "-", "marker": "x"},
}
def plot_noise_curve(configs, results, plot_path):
fig, ax = plt.subplots()
fig.suptitle("Accuracy vs noise")
ax.set_xlabel("Noise")
ax.set_ylabel("Accuracy (percent)")
for exp in configs:
ax.plot(results[exp], **EXPERIMENTS[exp])
# ax.xaxis.set_ticks(np.arange(0.0, 0.5 + 0.1, 0.1))
plt.legend()
plt.grid(axis="y")
plt.savefig(plot_path)
plt.close()
def plot_images_with_noise(datadir, noise_values, plot_path):
"""Plot Sample MNIST images with noise."""
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
dataset = datasets.MNIST(datadir, train=False, download=True, transform=transform)
num_noise = len(noise_values)
fig = plt.figure(figsize=(num_noise, 4))
for y in range(4):
for x in range(num_noise):
transform.transforms.append(
RandomNoise(noise_values[x], high_value=0.1307 + 2 * 0.3081)
)
img, _ = dataset[y]
transform.transforms.pop()
ax = fig.add_subplot(4, num_noise, y * num_noise + x + 1)
ax.set_axis_off()
ax.imshow(img.numpy().reshape((28, 28)), cmap="gray")
if y == 0:
ax.set_title("{0}%".format(noise_values[x] * 100))
plt.tight_layout()
plt.savefig(plot_path)
plt.close()
@click.command()
@click.option(
"-c",
"--config",
metavar="FILE",
type=open,
default="experiments.cfg",
show_default=True,
help="your experiments config file",
)
def main(config):
# Use configuration file location as the project location.
project_dir = Path(dirname(config.name)).expanduser().resolve()
data_dir = Path(project_dir) / "data"
# Load and parse experiment configurations
configs = parse_config(
config_file=config,
experiments=list(EXPERIMENTS.keys()),
globals_param=globals(),
)
results = {}
for exp in configs:
config = configs[exp]
# Load experiment data
data_dir = Path(config["data_dir"]).expanduser().resolve()
path = Path(config["path"]).expanduser().resolve()
experiment_path = path / exp
experiment_state = load_ray_tune_experiment(
experiment_path=experiment_path, load_results=True
)
# Load noise score and compute the mean_accuracy over all checkpoints
exp_df = pd.DataFrame()
for checkpoint in experiment_state["checkpoints"]:
logdir = experiment_path / basename(checkpoint["logdir"])
filename = logdir / "noise.json"
with open(filename, "r") as f:
df = pd.DataFrame(json.load(f)).transpose()
exp_df = exp_df.append(df["mean_accuracy"], ignore_index=True)
results[exp] = exp_df.mean()
plot_path = project_dir / "accuracy_vs_noise.pdf"
plot_noise_curve(configs=configs, results=results, plot_path=plot_path)
# Plot noisy images
plot_path = project_dir / "mnist_images_with_noise.pdf"
plot_images_with_noise(
datadir=data_dir,
noise_values=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
plot_path=plot_path,
)
if __name__ == "__main__":
main()
| agpl-3.0 |
pgr-me/metis_projects | 05-lights/library/_02_clean.py | 1 | 1176 | import geopandas as gpd
import pandas as pd
import pickle
# load, clean, and normalize country-level lights data
with open('data/geo/pickles/zonal_stats_c.pickle') as f:
gdf = pickle.load(f)
gdf = pd.DataFrame(gdf)
gdf = gdf.drop_duplicates(subset='WB_A3')
gdf = gdf.set_index('WB_A3')
gdf.drop(['ADMIN', 'CONTINENT', 'ISO_A3', 'REGION_UN', 'REGION_WB', 'SUBREGION', 'geometry'], axis=1, inplace=True)
gdf_normalizer = (gdf.F101992).as_matrix()
gdf_normed = gdf.divide(gdf_normalizer, axis=0)
# Load, clean, and normalize wb data
wb = pd.read_csv('data/econ/wb.csv')
# wb = wb[wb['Series Name'] == 'GDP at market prices (constant 2005 US$)']
label = 'GDP, PPP (constant 2011 international $)'
wb = wb[wb['Series Name'] == label]
wb.drop(['Country Name', 'Series Name', 'Series Code', '2014', '2015'], axis=1, inplace=True)
wb.rename(columns={'Country Code': 'WB_A3'}, inplace=True)
wb.dropna(axis=0, inplace=True)
wb = wb.set_index('WB_A3')
wb_normalizer = (wb['1992']).as_matrix()
wb_normed = wb.divide(wb_normalizer, axis=0)
# join lights and wb datasets
df = gdf_normed.join(wb_normed, how='inner')
# pickle joined dataframe
df.to_pickle('data/cleaned_df.pickle') | gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/test_panelnd.py | 7 | 3952 | # -*- coding: utf-8 -*-
import nose
from pandas.core import panelnd
from pandas.core.panel import Panel
from pandas.util.testing import assert_panel_equal
import pandas.util.testing as tm
class TestPanelnd(tm.TestCase):
def setUp(self):
pass
def test_4d_construction(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_alt(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='Panel',
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_error(self):
# create a 4D
self.assertRaises(Exception,
panelnd.create_nd_panel_factory,
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='foo',
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
stat_axis=2)
def test_5d_construction(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels1', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
# deprecation GH13564
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
# create a 5D
Panel5D = panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['cool1', 'labels1', 'items', 'major_axis',
'minor_axis'],
slices={'labels1': 'labels1', 'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel4D,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
# deprecation GH13564
p5d = Panel5D(dict(C1=p4d))
# slice back to 4d
results = p5d.ix['C1', :, :, 0:3, :]
expected = p4d.ix[:, :, 0:3, :]
assert_panel_equal(results['L1'], expected['L1'])
# test a transpose
# results = p5d.transpose(1,2,3,4,0)
# expected =
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
VirusTotal/msticpy | msticpy/sectools/tiproviders/azure_sent_byoti.py | 1 | 4854 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Azure Sentinel TI provider class.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
from typing import Any, Dict, Tuple
import pandas as pd
from ..._version import VERSION
from ...common.utility import export
from .ti_provider_base import LookupResult, TISeverity
from .kql_base import KqlTIProvider
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class AzSTI(KqlTIProvider):
"""Azure Sentinel TI provider class."""
_IOC_QUERIES: Dict[str, tuple] = {
"ipv4": ("ThreatIntelligence.list_indicators_by_ip", {"ioc": "observables"}),
"file_hash": (
"ThreatIntelligence.list_indicators_by_hash",
{"ioc": "observables"},
),
"windows_path": (
"ThreatIntelligence.list_indicators_by_filepath",
{"ioc": "observables"},
),
"dns": ("ThreatIntelligence.list_indicators_by_domain", {"ioc": "observables"}),
"url": ("ThreatIntelligence.list_indicators_by_url", {"ioc": "observables"}),
}
# aliases
_IOC_QUERIES["ipv6"] = _IOC_QUERIES["ipv4"]
_IOC_QUERIES["md5_hash"] = _IOC_QUERIES["file_hash"]
_IOC_QUERIES["sha1_hash"] = _IOC_QUERIES["file_hash"]
_IOC_QUERIES["sha256_hash"] = _IOC_QUERIES["file_hash"]
_IOC_QUERIES["linux_path"] = _IOC_QUERIES["windows_path"]
_IOC_QUERIES["hostname"] = _IOC_QUERIES["dns"]
def parse_results(self, response: LookupResult) -> Tuple[bool, TISeverity, Any]:
"""
Return the details of the response.
Parameters
----------
response : LookupResult
The returned data response
Returns
-------
Tuple[bool, TISeverity, Any]
bool = positive or negative hit
TISeverity = enumeration of severity
Object with match details
"""
if response.raw_result is None:
return False, TISeverity.information, "No data"
severity = TISeverity.warning
# if this is a series (single row) return a dictionary
if isinstance(response.raw_result, pd.Series):
extracted_data = response.raw_result[
["Action", "ThreatType", "ThreatSeverity", "Active", "ConfidenceScore"]
].to_dict()
if extracted_data["Action"].lower() in ["alert", "block"]:
severity = TISeverity.high
return True, TISeverity.warning, extracted_data
# if this is a dataframe (multiple rows)
# concatenate the values for each column/record into a list
# and return as a dictionary
if isinstance(response.raw_result, pd.DataFrame):
d_frame = response.raw_result
if d_frame["Action"].str.lower().isin(["alert", "block"]).any():
severity = TISeverity.high
return (
True,
severity,
{
"Action": self._series_to_list(d_frame["Action"]),
"ThreatType": self._series_to_list(d_frame["ThreatType"]),
"ThreatSeverity": self._series_to_list(d_frame["ThreatSeverity"]),
"Active": self._series_to_list(d_frame["Active"]),
"Description": self._series_to_list(d_frame["Description"]),
"ConfidenceScore": self._series_to_list(d_frame["ConfidenceScore"]),
},
)
return False, TISeverity.information, "No data"
@staticmethod
def _get_detail_summary(data_result: pd.DataFrame) -> pd.Series:
# For the input frame return details in a series with
# Details in dict
return data_result.apply(
lambda x: {
"Action": x.Action,
"ThreatType": x.ThreatType,
"ThreatSeverity": x.ThreatSeverity,
"Active": x.Active,
"Description": x.Description,
"ConfidenceScore": x.ConfidenceScore,
},
axis=1,
)
@staticmethod
def _get_severity(data_result: pd.DataFrame) -> pd.Series:
# For the input frame return severity in a series
return data_result.apply(
lambda x: TISeverity.high.value
if x.Action.lower() in ["alert", "block"]
else TISeverity.warning.value,
axis=1,
)
| mit |
alexlib/image_registration | examples/benchmarks_shift.py | 4 | 6045 | """
imsize map_coordinates fourier_shift
50 0.016211 0.00944495
84 0.0397182 0.0161059
118 0.077543 0.0443089
153 0.132948 0.058187
187 0.191808 0.0953341
221 0.276543 0.12069
255 0.357552 0.182863
289 0.464547 0.26451
324 0.622776 0.270612
358 0.759015 0.713239
392 0.943339 0.441262
426 1.12885 0.976379
461 1.58367 1.26116
495 1.62482 0.824757
529 1.83506 1.19455
563 3.21001 2.82487
597 2.64892 2.23473
632 2.74313 2.21019
666 3.07002 2.49054
700 3.50138 1.59507
Fourier outperforms map_coordinates slightly. It wraps, though, while
map_coordinates in general does not.
With skimage:
imsize map_coordinates fourier_shift skimage
50 0.0154819 0.00862598 0.0100191
84 0.0373471 0.0164428 0.0299141
118 0.0771091 0.0555351 0.047652
153 0.128651 0.0582621 0.108211
187 0.275812 0.129408 0.17893
221 0.426893 0.177555 0.281367
255 0.571022 0.26866 0.354988
289 0.75541 0.412766 0.415558
324 1.02605 0.402632 0.617405
358 1.14151 0.975867 0.512207
392 1.51085 0.549434 0.904133
426 1.72907 1.28387 0.948763
461 2.03424 1.79091 1.09984
495 2.23595 0.976755 1.49104
529 2.59915 1.95115 1.47774
563 3.34082 3.03312 1.76769
597 3.43117 2.84357 2.67582
632 4.06516 4.19464 2.22102
666 6.22056 3.65876 2.39756
700 5.06125 2.00939 2.73733
Fourier's all over the place, probably because of a strong dependence on
primeness. Comparable to skimage for some cases though.
"""
import itertools
import timeit
import time
import numpy as np
timings = {'map_coordinates':[],
'fourier_shift':[],
'skimage':[],
#'griddata_nearest':[],
#'griddata_linear':[],
#'griddata_cubic':[],
}
imsizes = np.round(np.linspace(50,1024,20))
imsizes = np.round(np.linspace(50,700,20)) # just playing around with what's reasonable for my laptop
for imsize in imsizes:
t0 = time.time()
setup = """
import numpy as np
#im = np.random.randn({imsize},{imsize})
yy,xx = np.indices([{imsize},{imsize}])
im = np.exp(-((xx-{imsize}/2.)**2+(yy-{imsize}/2.)**2)/(2**2*2.))
yr = np.arange({imsize})
xr = np.arange({imsize})
import image_registration.fft_tools.shift as fsh
import image_registration.fft_tools.zoom as fzm
import scipy.interpolate as si
import scipy.ndimage as snd
points = zip(xx.flat,yy.flat)
imflat = im.ravel()
import skimage.transform as skit
skshift = skit.AffineTransform(translation=[0.5,0.5])
""".replace(" ","").format(imsize=imsize)
fshift_timer = timeit.Timer("ftest=fsh.shiftnd(im,(0.5,0.5))",
setup=setup)
# too slow!
# interp2d_timer = timeit.Timer("itest=si.interp2d(xr,yr,im)(xr-0.5,yr-0.5)",
# setup=setup)
mapcoord_timer = timeit.Timer("mtest=snd.map_coordinates(im,[yy-0.5,xx-0.5])",
setup=setup)
# not exactly right; doesn't do quite the same thing as the others...
# but wow, I wish I'd figured out how to use this a week ago...
skimage_timer = timeit.Timer("stest=skit.warp(im,skshift)",setup=setup)
# all slopw
#grid_timer_nearest = timeit.Timer("gtest=si.griddata(points,imflat,(xx-0.5,yy-0.5), method='nearest')",
# setup=setup)
#grid_timer_linear = timeit.Timer("gtest=si.griddata(points,imflat,(xx-0.5,yy-0.5), method='linear')",
# setup=setup)
#grid_timer_cubic = timeit.Timer("gtest=si.griddata(points,imflat,(xx-0.5,yy-0.5), method='cubic')",
# setup=setup)
print "imsize %i fourier shift " % imsize,
timings['fourier_shift'].append( np.min(fshift_timer.repeat(3,10)) )
print "imsize %i map_coordinates shift " % imsize,
timings['map_coordinates'].append( np.min(mapcoord_timer.repeat(3,10)) )
print "imsize %i skimage shift " % imsize,
timings['skimage'].append( np.min(skimage_timer.repeat(3,10)) )
#timings['griddata_nearest'].append( np.min(grid_timer_nearest.repeat(3,10)) )
#timings['griddata_linear'].append( np.min(grid_timer_linear.repeat(3,10)) )
#timings['griddata_cubic'].append( np.min(grid_timer_cubic.repeat(3,10)) )
print "imsize %i done, %f seconds" % (imsize,time.time()-t0)
print "%10s " % "imsize"," ".join(["%16s" % t for t in timings.keys()])
for ii,sz in enumerate(imsizes):
print "%10i " % sz," ".join(["%16.6g" % t[ii] for t in timings.values()])
import scipy.optimize as scopt
def f(x,a,b,c):
return c+a*x**b
pm,err = scopt.curve_fit(f,imsizes[2:],timings['map_coordinates'][2:])
pf,err = scopt.curve_fit(f,imsizes[2:],timings['fourier_shift'][2:])
ps,err = scopt.curve_fit(f,imsizes[2:],timings['skimage'][2:])
import matplotlib.pyplot as pl
pl.clf()
pl.loglog(imsizes,timings['map_coordinates'],'+',label='map_coordinates')
pl.loglog(imsizes,timings['fourier_shift'],'x',label='fourier')
pl.loglog(imsizes,timings['skimage'],'o',label='skimage')
pl.loglog(imsizes,f(imsizes,*pm))
pl.loglog(imsizes,f(imsizes,*pf))
| mit |
zfrenchee/pandas | pandas/tests/scalar/test_nat.py | 1 | 9659 | import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex, PeriodIndex,
TimedeltaIndex, Series, isna)
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
from pandas.compat import callable
@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
(Timedelta('NaT'), TimedeltaIndex),
(Period('NaT', freq='M'), PeriodIndex)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
s = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_identity(klass):
assert klass(None) is NaT
result = klass(np.nan)
assert result is NaT
result = klass(None)
assert result is NaT
result = klass(iNaT)
assert result is NaT
result = klass(np.nan)
assert result is NaT
result = klass(float('nan'))
assert result is NaT
result = klass(NaT)
assert result is NaT
result = klass('NaT')
assert result is NaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_equality(klass):
# nat
if klass is not Period:
klass('').value == iNaT
klass('nat').value == iNaT
klass('NAT').value == iNaT
klass(None).value == iNaT
klass(np.nan).value == iNaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_round_nat(klass):
# GH14940
ts = klass('nat')
for method in ["round", "floor", "ceil"]:
round_method = getattr(ts, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert round_method(freq) is ts
def test_NaT_methods():
# GH 9513
# GH 17329 for `timestamp`
raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
'fromordinal', 'fromtimestamp', 'isocalendar',
'strftime', 'strptime', 'time', 'timestamp',
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple', 'timestamp']
nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
if hasattr(NaT, method):
with pytest.raises(ValueError):
getattr(NaT, method)()
for method in nan_methods:
if hasattr(NaT, method):
assert np.isnan(getattr(NaT, method)())
for method in nat_methods:
if hasattr(NaT, method):
# see gh-8254
exp_warning = None
if method == 'to_datetime':
exp_warning = FutureWarning
with tm.assert_produces_warning(
exp_warning, check_stacklevel=False):
assert getattr(NaT, method)() is NaT
# GH 12300
assert NaT.isoformat() == 'NaT'
def test_NaT_docstrings():
# GH#17327
nat_names = dir(NaT)
# NaT should have *most* of the Timestamp methods, with matching
# docstrings. The attributes that are not expected to be present in NaT
# are private methods plus `ts_expected` below.
ts_names = dir(Timestamp)
ts_missing = [x for x in ts_names if x not in nat_names and
not x.startswith('_')]
ts_missing.sort()
ts_expected = ['freqstr', 'normalize',
'to_julian_date',
'to_period', 'tz']
assert ts_missing == ts_expected
ts_overlap = [x for x in nat_names if x in ts_names and
not x.startswith('_') and
callable(getattr(Timestamp, x))]
for name in ts_overlap:
tsdoc = getattr(Timestamp, name).__doc__
natdoc = getattr(NaT, name).__doc__
assert tsdoc == natdoc
# NaT should have *most* of the Timedelta methods, with matching
# docstrings. The attributes that are not expected to be present in NaT
# are private methods plus `td_expected` below.
# For methods that are both Timestamp and Timedelta methods, the
# Timestamp docstring takes priority.
td_names = dir(Timedelta)
td_missing = [x for x in td_names if x not in nat_names and
not x.startswith('_')]
td_missing.sort()
td_expected = ['components', 'delta', 'is_populated',
'to_pytimedelta', 'to_timedelta64', 'view']
assert td_missing == td_expected
td_overlap = [x for x in nat_names if x in td_names and
x not in ts_names and # Timestamp __doc__ takes priority
not x.startswith('_') and
callable(getattr(Timedelta, x))]
assert td_overlap == ['total_seconds']
for name in td_overlap:
tddoc = getattr(Timedelta, name).__doc__
natdoc = getattr(NaT, name).__doc__
assert tddoc == natdoc
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_isoformat(klass):
result = klass('NaT').isoformat()
expected = 'NaT'
assert result == expected
def test_nat_arithmetic():
# GH 6873
i = 2
f = 1.5
for (left, right) in [(NaT, i), (NaT, f), (NaT, np.nan)]:
assert left / right is NaT
assert left * right is NaT
assert right * left is NaT
with pytest.raises(TypeError):
right / left
# Timestamp / datetime
t = Timestamp('2014-01-01')
dt = datetime(2014, 1, 1)
for (left, right) in [(NaT, NaT), (NaT, t), (NaT, dt)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# timedelta-like
# offsets are tested in test_offsets.py
delta = timedelta(3600)
td = Timedelta('5s')
for (left, right) in [(NaT, delta), (NaT, td)]:
# NaT + timedelta-like returns NaT
assert right + left is NaT
assert left + right is NaT
assert right - left is NaT
assert left - right is NaT
assert np.isnan(left / right)
assert np.isnan(right / left)
# GH 11718
t_utc = Timestamp('2014-01-01', tz='UTC')
t_tz = Timestamp('2014-01-01', tz='US/Eastern')
dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
for (left, right) in [(NaT, t_utc), (NaT, t_tz),
(NaT, dt_tz)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# int addition / subtraction
for (left, right) in [(NaT, 2), (NaT, 0), (NaT, -3)]:
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
def test_nat_arithmetic_index():
# GH 11718
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
exp = DatetimeIndex([NaT, NaT], name='x')
tm.assert_index_equal(dti + NaT, exp)
tm.assert_index_equal(NaT + dti, exp)
dti_tz = DatetimeIndex(['2011-01-01', '2011-01-02'],
tz='US/Eastern', name='x')
exp = DatetimeIndex([NaT, NaT], name='x', tz='US/Eastern')
tm.assert_index_equal(dti_tz + NaT, exp)
tm.assert_index_equal(NaT + dti_tz, exp)
exp = TimedeltaIndex([NaT, NaT], name='x')
for (left, right) in [(NaT, dti), (NaT, dti_tz)]:
tm.assert_index_equal(left - right, exp)
tm.assert_index_equal(right - left, exp)
# timedelta
tdi = TimedeltaIndex(['1 day', '2 day'], name='x')
exp = DatetimeIndex([NaT, NaT], name='x')
for (left, right) in [(NaT, tdi)]:
tm.assert_index_equal(left + right, exp)
tm.assert_index_equal(right + left, exp)
tm.assert_index_equal(left - right, exp)
tm.assert_index_equal(right - left, exp)
def test_nat_pinned_docstrings():
# GH17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
| bsd-3-clause |
hotpxl/nebuchadnezzar | slides_plots.py | 1 | 9381 | #!/usr/bin/env python3.4
import datetime
import math
import matplotlib.pyplot as plt
import matplotlib.dates
import numpy as np
import pandas
import statsmodels.tsa.api
import statsmodels.tsa.stattools
import stats.data
all_plots = []
def register_plot(func):
def ret(*args, **kwargs):
kwargs['func_name'] = func.__name__
return func(*args, **kwargs)
all_plots.append(ret)
return ret
@register_plot
def volume_and_click_count(func_name):
d = stats.data.get_merged_old('600000', 'date', 'volume', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
volume = d[:, 1]
click_count = d[:, 2]
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
lines = []
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, volume, 'b-', label='Volume')
ax0.set_xlabel('Date')
ax0.set_ylabel('Volume')
lines += ax1.plot(dates, click_count, 'r-', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
@register_plot
def price_and_click_count(func_name):
d = stats.data.get_merged_old('600000', 'date', 'close', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
price = d[:, 1]
click_count = d[:, 2]
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
lines = []
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, price, 'b-', label='Close price')
ax0.set_xlabel('Date')
ax0.set_ylabel('Close price')
lines += ax1.plot(dates, click_count, 'r-', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
@register_plot
def granger_causality_test_volume_on_sse_50(func_name):
results = []
tests = [
('ssr_ftest', 'SSR $F$ test', 'r'),
('params_ftest', 'Params $F$ test', 'g'),
('lrtest', 'LR test', 'b'),
('ssr_chi2test', 'SSR $\chi^{2}$ test', 'y'),
]
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag_order = model.select_order(verbose=False)
lag = lag_order['hqic']
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d[:, 1:], lag, verbose=False)
cur = []
for i in tests:
cur.append(res[lag][0][i[0]][1])
results.append(cur)
fig, ax = plt.subplots()
ax.set_ylim((0, 1))
index = np.arange(len(results))
bar_width = 0.2
for i in range(len(tests)):
plt.bar(index + i * bar_width, np.asarray(results)[:, i].flatten(), bar_width, color=tests[i][2], label=tests[i][1])
plt.xlabel('Stock')
plt.ylabel('$p$ value')
plt.legend(loc=0)
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
@register_plot
def granger_causality_test_price_on_sse_50(func_name):
results = []
tests = [
('ssr_ftest', 'SSR $F$ test', 'r'),
('params_ftest', 'Params $F$ test', 'g'),
('lrtest', 'LR test', 'b'),
('ssr_chi2test', 'SSR $\chi^{2}$ test', 'y'),
]
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'close', 'readCount')
price = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({
'price': price,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag_order = model.select_order(verbose=False)
lag = lag_order['hqic']
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d[:, 1:], lag, verbose=False)
cur = []
for i in tests:
cur.append(res[lag][0][i[0]][1])
results.append(cur)
fig, ax = plt.subplots()
ax.set_ylim((0, 1))
index = np.arange(len(results))
bar_width = 0.2
for i in range(len(tests)):
plt.bar(index + i * bar_width, np.asarray(results)[:, i].flatten(), bar_width, color=tests[i][2], label=tests[i][1])
plt.xlabel('Stock')
plt.ylabel('$p$ value')
plt.legend(loc=0)
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
@register_plot
def granger_causality_test_price_positive_on_sse_50(func_name):
results = []
tests = [
('ssr_ftest', 'SSR $F$ test', 'r'),
('params_ftest', 'Params $F$ test', 'g'),
('lrtest', 'LR test', 'b'),
('ssr_chi2test', 'SSR $\chi^{2}$ test', 'y'),
]
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'close', 'readCount')
ds = stats.data.get_merged(index, 'positiveCount', 'negativeCount')
price = d[:, 1].astype(float)
click_count = np.multiply(ds[:, 0].astype(float) / (ds[:, 0] + ds[:, 1]).astype(float), d[:, 2].astype(float))
data = pandas.DataFrame({
'price': price,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag_order = model.select_order(verbose=False)
lag = lag_order['hqic']
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d[:, 1:], lag, verbose=False)
cur = []
for i in tests:
cur.append(res[lag][0][i[0]][1])
results.append(cur)
fig, ax = plt.subplots()
ax.set_ylim((0, 1))
index = np.arange(len(results))
bar_width = 0.2
for i in range(len(tests)):
plt.bar(index + i * bar_width, np.asarray(results)[:, i].flatten(), bar_width, color=tests[i][2], label=tests[i][1])
plt.xlabel('Stock')
plt.ylabel('$p$ value')
plt.legend(loc=0)
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
@register_plot
def volume_forecast_regression_line(func_name):
d = stats.data.get_merged_old(600036, 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
print(math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume)))
fig, ax = plt.subplots()
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.plot(dates, volume, 'r-', label='Real')
ax.plot(dates, prediction, 'b-', label='Prediction')
ax.set_ylabel('Volume')
ax.set_xlabel('Date')
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
@register_plot
def price_forecast_regression_line(func_name):
d = stats.data.get_merged_old(600036, 'date', 'close', 'readCount')
ds = stats.data.get_merged(600036, 'positiveCount', 'negativeCount')
price = d[:, 1].astype(float)
click_count = np.multiply(ds[:, 0].astype(float) / (ds[:, 0] + ds[:, 1]).astype(float), d[:, 2].astype(float))
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
data = pandas.DataFrame({
'price': price,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - price[j]
cnt += diff ** 2
print(math.sqrt(cnt / (length - lag)) / (max(price) - min(price)))
fig, ax = plt.subplots()
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
dates = dates[lag:]
prediction = prediction[lag:]
price = price[lag:]
ax.plot(dates, price, 'r-', label='Real')
ax.plot(dates, prediction, 'b-', label='Prediction')
ax.set_ylabel('Price')
ax.set_xlabel('Date')
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('slides/final/plots/{}.pdf'.format(func_name))
if __name__ == '__main__':
for i in all_plots:
i()
| mit |
1kastner/analyse_weather_data | interpolation/visualise_semivariogram.py | 1 | 4838 | """
"""
import logging
import datetime
import numpy
import pandas
from matplotlib import pyplot
import dateutil.parser
from pykrige.ok import OrdinaryKriging
import geopy
import geopy.distance
from filter_weather_data import RepositoryParameter, get_repository_parameters
from filter_weather_data.filters import StationRepository
def plot_variogram(X, Y, Z, title=None, legend_entry=None):
ok = OrdinaryKriging(X, Y, Z, variogram_model='spherical', verbose=False, enable_plotting=False, nlags=8)
if title:
fig = pyplot.figure()
pyplot.plot(ok.lags, ok.semivariance, 'o-', label=legend_entry)
pyplot.ylabel("$\gamma(h)$")
pyplot.xlabel("$h$ ($in$ $km$)")
pyplot.grid(color='.8') # a very light gray
if title:
fig.canvas.set_window_title(title)
logging.debug("plotting preparation done")
def convert_to_meter_distance(latitudes, longitudes):
"""
This is some kind of northing/easting because the smallest longitude/latitude values are used.
However, the edge cases are not considered.
:param latitudes:
:param longitudes:
:return:
"""
min_lat = min(latitudes)
min_lon = min(longitudes)
X, Y = [], []
for lat, lon in zip(latitudes, longitudes):
this_point = geopy.Point(lat, lon)
x_meter = geopy.distance.distance(geopy.Point(lat, min_lon), this_point).km
y_meter = geopy.distance.distance(geopy.Point(min_lat, lon), this_point).km
logging.debug("convert {lat}:{lon} to {x_meter}:{y_meter}".format(lat=lat, lon=lon, x_meter=x_meter,
y_meter=y_meter))
X.append(x_meter)
Y.append(y_meter)
return X, Y
def sample_up(df, start_date, end_date, decay):
df_year = pandas.DataFrame(index=pandas.date_range(start_date, end_date, freq='T', name="datetime"))
df = df.join(df_year, how="outer")
df.temperature.fillna(method="ffill", limit=decay, inplace=True)
return df
def load_data(station_dicts, date):
start_date = dateutil.parser.parse(date) - datetime.timedelta(minutes=30) # first value to load
end_date = date # load until this date (station repository adds some margin)
t = date # check the values at this given time
latitudes, longitudes, Z = [], [], []
for station_dict in station_dicts:
station_dict["data_frame"] = sample_up(station_dict["data_frame"], start_date, end_date, 30) # 30 minutes decay
temperature = station_dict["data_frame"].loc[t].temperature
if numpy.isnan(temperature):
continue
position = station_dict["meta_data"]["position"]
latitudes.append(position["lat"])
longitudes.append(position["lon"])
Z.append(temperature)
X, Y = convert_to_meter_distance(latitudes, longitudes)
logging.debug("conversion to kilometer distances done")
return X, Y, Z
def get_station_dicts(start_date, end_date):
# repository_parameter = RepositoryParameter.START
repository_parameter = RepositoryParameter.ONLY_OUTDOOR_AND_SHADED
station_repository = StationRepository(*get_repository_parameters(repository_parameter))
station_dicts = station_repository.load_all_stations(
start_date,
end_date
)
return station_dicts
DATES = [
'2016-01-01T13:00',
'2016-02-01T13:00',
'2016-03-01T13:00',
'2016-04-01T13:00',
'2016-05-01T13:00',
'2016-06-01T13:00',
'2016-07-01T13:00',
'2016-08-01T13:00',
'2016-09-01T13:00',
'2016-10-01T13:00',
'2016-11-01T13:00',
'2016-12-01T13:00'
]
def germanize_iso_date(date):
return dateutil.parser.parse(date).strftime("%d.%m.%Y, %H:%M")
def demo():
dates = DATES[:]
station_dicts = get_station_dicts("2016-01-01", "2016-12-31")
for date in dates:
X, Y, Z = load_data(station_dicts, date)
plot_variogram(X, Y, Z, legend_entry=germanize_iso_date(date))
pyplot.legend()
pyplot.show()
def demo2():
"""
Shows that convert_to_meter_distance presents good results (edge cases not considered).
"""
lat_lon_label_rows = [
(53.64159, 9.94502, "center"),
(53.84159, 9.94502, "north"),
(53.44159, 9.94502, "south"),
(53.64159, 9.74502, "west"),
(53.64159, 10.14502, "east")
]
lats, lons, labels = [], [], []
for lat, lon, label in lat_lon_label_rows:
lats.append(lat)
lons.append(lon)
labels.append(label)
x_meters, y_meters = convert_to_meter_distance(lats, lons)
fig, ax = pyplot.subplots()
ax.scatter(x_meters, y_meters)
for x, y, label in zip(x_meters, y_meters, labels):
ax.annotate(label, (x, y))
pyplot.show()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
demo()
# demo2()
| agpl-3.0 |
ilyes14/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
danieldmm/minerva | models/models_util.py | 1 | 1139 | import matplotlib.pyplot as plt
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):
""" Plot model loss and accuracy through epochs. """
green = '#72C29B'
orange = '#FFA577'
with plt.xkcd():
# plot model loss
fig, ax1 = plt.subplots()
ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,
label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,
linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
# plot model accuracy
fig, ax2 = plt.subplots()
ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,
label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,
linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
| gpl-3.0 |
jjaner/essentia-musicbricks | src/examples/python/experimental/beatogram.py | 10 | 26647 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os, sys
from os.path import join
import essentia
from essentia.streaming import *
import essentia.standard as std
from pylab import median, mean, argmax
import matplotlib
#matplotlib.use('Agg') # in order to not grab focus on screen while batch processing
import matplotlib.pyplot as pyplot
import numpy as np
from numpy import shape, zeros, fabs
# for key input
import termios, sys, os, subprocess
TERMIOS = termios
import copy
# for alsa
if sys.platform =='linux2':
import wave, alsaaudio
import time
import thread
barkBands = [0.0, 50.0, 100.0, 150.0, 200.0, 300.0, 400.0, 510.0, 630.0, 770.0,
920.0, 1080.0, 1270.0, 1480.0, 1720.0, 2000.0, 2320.0, 2700.0,
3150.0, 3700.0, 4400.0, 5300.0, 6400.0, 7700.0,
9500.0, 12000.0, 15500.0, 20500.0, 27000.0]
scheirerBands = [ 0.0, 200.0, 400.0, 800.0, 1600.0, 3200.0, 22000.0]
scheirerBands_extended = [ 0.0, 50.0, 100.0, 150.0, 200.0, 400.0, 800.0, 1600.0, 3200.0, 5000.0, 10000.0]
EqBands = [20.0, 150.0, 400.0, 3200.0, 7000.0, 22000.0]
EqBands2 =[0.0, 75.0, 150.0, 400.0, 3200.0, 7000.0]
DOWNMIX ='mix'
# defines for novelty curve:
FRAMESIZE = 1024
HOPSIZE = FRAMESIZE/2
WEIGHT='flat' #'supplied' #'flat'
SAMPLERATE=44100.0
WINDOW='hann' #'blackmanharris92'
BEATWINDOW=16 # number of beats where to compute statistics
# tempogram defines:
FRAMERATE = float(SAMPLERATE)/HOPSIZE
TEMPO_FRAMESIZE = 4;
TEMPO_OVERLAP=2;
STARTTIME = 0
ENDTIME = 2000
def computeOnsets(filename, pool):
loader = EasyLoader(filename=filename,
sampleRate=pool['samplerate'],
startTime=STARTTIME, endTime=ENDTIME,
downmix=pool['downmix'])
onset = OnsetRate()
loader.audio >> onset.signal
onset.onsetTimes >> (pool, 'ticks')
onset.onsetRate >> None
essentia.run(loader)
pool.set('size', loader.audio.totalProduced())
pool.set('length', pool['size']/pool['samplerate'])
def computeSegmentation(filename, pool):
sampleRate = 44100
frameSize = 2048
hopSize = frameSize/2
audio = EqloudLoader(filename = filename,
downmix=pool['downmix'],
sampleRate=sampleRate)
fc = FrameCutter(frameSize=frameSize, hopSize=hopSize, silentFrames='keep')
w = Windowing(type='blackmanharris62')
spec = Spectrum()
mfcc = MFCC(highFrequencyBound=8000)
tmpPool = essentia.Pool()
audio.audio >> fc.signal
fc.frame >> w.frame >> spec.frame
spec.spectrum >> mfcc.spectrum
mfcc.bands >> (tmpPool, 'mfcc_bands')
mfcc.mfcc>> (tmpPool, 'mfcc_coeff')
essentia.run(audio)
# compute transpose of features array, don't call numpy.matrix.transpose
# because essentia fucks it up!!
features = copy.deepcopy(tmpPool['mfcc_coeff'].transpose())
segments = std.SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50)(features)
for segment in segments:
pool.add('segments', segment*hopSize/sampleRate)
#print pool['segments']
def computeNoveltyCurve(filename, pool):
loader = EasyLoader(filename=filename,
sampleRate=pool['samplerate'],
startTime=STARTTIME, endTime=ENDTIME,
downmix=pool['downmix'])
fc = FrameCutter(frameSize=int(pool['framesize']),
silentFrames ='noise',
hopSize=int(pool['hopsize']),
startFromZero=False)
window = Windowing(type=pool['window'],
zeroPhase=False)
#freqBands = FrequencyBands(frequencyBands=EqBands, sampleRate=pool['samplerate'])
freqBands = FrequencyBands(sampleRate=pool['samplerate'])
spec = Spectrum()
hfc = HFC()
loader.audio >> fc.signal
fc.frame >> window.frame >> spec.frame
spec.spectrum >> freqBands.spectrum
spec.spectrum >> hfc.spectrum
freqBands.bands >> (pool, 'frequency_bands')
hfc.hfc >> (pool, 'hfc')
essentia.run(loader)
pool.set('size', loader.audio.totalProduced())
pool.set('length', pool['size']/pool['samplerate'])
# compute a weighting curve that is according to frequency bands:
frequencyBands = pool['frequency_bands']
nFrames = len(frequencyBands)
weightCurve= np.sum(frequencyBands, axis=0)
weightCurve = [val/float(nFrames) for val in weightCurve]
weightCurve = essentia.normalize(weightCurve)
#pyplot.plot(weightCurve)
#pyplot.show()
noveltyCurve = std.NoveltyCurve(frameRate=pool['framerate'],
weightCurveType=pool['weight'],
weightCurve=weightCurve)(frequencyBands)
#for x in noveltyCurve: pool.add('novelty_curve', x)
#return
# derivative of hfc seems to help in finding more precise beats...
hfc = essentia.normalize(pool['hfc'])
dhfc = essentia.derivative(hfc)
for i, val in enumerate(dhfc):
if val< 0: continue
noveltyCurve[i] += val
# low pass filter novelty curve:
env = std.Envelope(attackTime=2./pool['framerate'],
releaseTime=2./pool['framerate'])(noveltyCurve)
# apply median filter:
windowSize = 8 #samples
size = len(env)
filtered = zeros(size)
for i in range(size):
start = i-windowSize
if start < 0: start = 0
end = start + windowSize
if end > size:
end = size
start = size-windowSize
filtered[i] = env[i] - np.median(env[start:end])
if filtered[i] < 0: filtered[i] = 0
#pyplot.subplot(311)
#pyplot.plot(noveltyCurve)
#pyplot.subplot(312)
#pyplot.plot(env, 'r')
#pyplot.subplot(313)
#pyplot.plot(filtered, 'g')
#pyplot.show()
#for x in noveltyCurve: pool.add('novelty_curve', x)
for x in filtered: pool.add('novelty_curve', x)
def computeBeats(filename, pool):
computeNoveltyCurve(filename, pool)
recompute = True
novelty = pool['novelty_curve']
count = 0
bpmTolerance = 5
while recompute:
gen = VectorInput(novelty)
bpmHist = BpmHistogram(frameRate=pool['framerate'],
frameSize=pool['tempo_framesize'],
overlap=int(pool['tempo_overlap']),
maxPeaks=50,
windowType='hann',
minBpm=40.0,
maxBpm=1000.0,
normalize=False,
constantTempo=False,
tempoChange=5,
weightByMagnitude=True)
gen.data >> bpmHist.novelty
bpmHist.bpm >> (pool, 'peaksBpm')
bpmHist.bpmMagnitude >> (pool, 'peaksMagnitude')
bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
bpmHist.confidence >> (pool, 'confidence')
bpmHist.ticks >> (pool, 'ticks')
bpmHist.ticksMagnitude >> (pool, 'ticksMagnitude')
bpmHist.sinusoid >> (pool, 'sinusoid')
essentia.run(gen)
## get rid of beats of beats > audio.length
#ticks = []
#ticksAmp = []
#for t, amp in zip(pool['ticks'], pool['ticksMagnitude']):
# if t < 0 or t > pool['length']: continue
# ticks.append(float(t))
# ticksAmp.append(float(amp))
#step = pool['step']
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
sine = pool['sinusoid']
#pyplot.plot(novelty, 'k')
#pyplot.plot(sine, 'r')
#for i in range(len(novelty)-1):
# diff = novelty[i+1]-novelty[i]
# if diff > 0: novelty[i] = diff
# else: novelty[i] = 0
#pyplot.plot(novelty, 'r')
prodPulse = zeros(len(novelty))
i = 0
while i < len(novelty):
if sine[i] <= 0.1:
i += 1
continue
window = []
while sine[i] != 0 and i < len(novelty):
window.append(novelty[i]*sine[i])
i+=1
peakPos = argmax(window)
peakPos = i - len(window) + peakPos
prodPulse[peakPos] = novelty[peakPos]
#pyplot.plot(prodPulse, 'g')
#pyplot.show()
ticks = []
ticksAmp = []
frameRate = pool['framerate']
bpms = pool['harmonicBpm']
print 'estimated bpm:', bpms
tatum = 60./bpms[0]
diffTick = 2*tatum
prevTick = -1
prevAmp = -1
for i, x in enumerate(prodPulse):
if x != 0:
newTick = float(i)/frameRate
if newTick < 0 or newTick > pool['length']: continue
ticks.append(newTick)
ticksAmp.append(x)
# if x != 0:
# newTick = float(i)/frameRate
# if prevTick < 0:
# ticks.append(newTick)
# ticksAmp.append(x)
# prevTick = newTick
# prevAmp = x
# else:
# diff = newTick-prevTick
# ratio = max( round(tatum/diff), round(diff/tatum))
# if (diff >= 0.9*tatum*ratio) and (diff <= 1.1*tatum*ratio):
# ticks.append(newTick)
# ticksAmp.append(x)
# prevTick = newTick
# prevAmp = x
# else: #(newTick-prevTick) < 0.75*tatum:
# newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
# ticks[-1] = newTick
# ticksAmp[-1] = (x+prevAmp)/2.
# prevTick = newTick
# prevAmp = (x+prevAmp)/2.
_, _, bestBpm= getMostStableTickLength(ticks)
#pool.set('bestTicksStart', bestTicks[0])
#pool.set('bestTicksEnd', bestTicks[0] + bestTicks[1])
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
#ticks = essentia.postProcessTicks(ticks)
if fabs(bestBpm - bpms[0]) < bpmTolerance: recompute = False
else:
count+=1
if count >= 5:
bpmTolerance += 1
count = 0
print "recomputing!!!!"
novelty = copy.deepcopy(pool['sinusoid'])
pool.remove('sinusoid')
pool.remove('novelty_curve')
pool.remove('peaksBpm')
pool.remove('peaksMagnitude')
pool.remove('harmonicBpm')
pool.remove('harmonicBpm')
pool.remove('confidence')
pool.remove('ticks')
pool.remove('ticksMagnitude')
#pyplot.plot(prodPulse, 'g')
#pyplot.show()
print 'estimated bpm:', bpms
print 'bpms:', pool['peaksBpm']
#ticks = postProcessTicks(filename, pool)
#print 'bpm mags:', pool['peaksMagnitude']
bpmRatios = []
#for i, bpm1 in enumerate(bpms):
# bpmRatios.append([float(bpm1)/float(bpm2) for bpm2 in bpms[i:]])
#print 'bpmRatios:', bpmRatios
#print 'original nticks:', len(ticks)
#print 'step:', step
if step>1:
ticks = essentia.array(map(lambda i: ticks[i],
filter(lambda i: i%step == 0,range(len(ticks)))))
#print 'nticks:', len(ticks)
pool.remove('ticks')
pool.set('ticks', ticks)
def longestChain(dticks, startpos, period, tolerance):
pos = startpos
ubound = period*(1+tolerance)
lbound = period*(1-tolerance)
while (pos < len(dticks)) and\
(lbound < dticks[pos] and dticks[pos] < ubound):
pos += 1
return pos - startpos
def getMostStableTickLength(ticks):
nticks = len(ticks)
dticks = zeros(nticks-1)
for i in range(nticks-1):
dticks[i] = (ticks[i+1] - ticks[i])
hist, distx = np.histogram(dticks, bins=50*(1+(max(dticks)-min(dticks))))
bestPeriod = distx[argmax(hist)] # there may be more than one candidate!!
bestBpm = 60./bestPeriod
print 'best period', bestPeriod
print 'best bpm:', bestBpm
#print 'hist:', hist, distx
maxLength = 0
idx = 0
for startpos in range(nticks-1):
l = longestChain(dticks, startpos, bestPeriod, 0.1)
if l > maxLength :
maxLength = l;
idx = startpos;
print 'max stable length:', idx, maxLength
return idx, maxLength, bestBpm
def postProcessTicks(audioFilename, pool):
'''Computes delta energy in order to find the correct position of the ticks'''
# get rid of beats of beats > audio.length
ticks = []
ticksAmp = []
for t, amp in zip(pool['ticks'], pool['ticksMagnitude']):
if t < 0 or t > pool['length']: continue
ticks.append(float(t))
ticksAmp.append(float(amp))
step = pool['step']
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
#beatWindowDuration = 0.01 # seconds
#beatDuration = 0.005 # seconds
#rmsFrameSize = 64
#rmsHopSize = rmsFrameSize/2
#audio = std.MonoLoader(filename=audioFilename,
# sampleRate=pool['samplerate'],
# downmix=pool['downmix'])()
#for i, tick in enumerate(ticks):
# startTime = tick - beatWindowDuration/2.0
# if startTime < 0: startTime = 0
# endTime = startTime + beatWindowDuration + beatDuration + 0.0001
# slice = std.Trimmer(sampleRate=pool['samplerate'],
# startTime=startTime,
# endTime=endTime)(audio)
# frames = std.FrameGenerator(slice, frameSize=rmsFrameSize, hopSize=rmsHopSize)
# maxDeltaRms=0
# RMS = std.RMS()
# prevRms = 0
# pos = 0
# tickPos = pos
# for frame in frames:
# rms = RMS(frame)
# diff = rms - prevRms
# if diff > maxDeltaRms:
# tickPos = pos
# maxDeltaRms = diff
# pos+=1
# prevRms = rms
# ticks[i]= tick + tickPos*float(rmsHopSize)/pool['samplerate']
return ticks
def writeBeatFile(filename, pool) :
beatFilename = os.path.splitext(filename)[0] + '_beat.wav' #'out_beat.wav' #
audio = EasyLoader(filename=filename, downmix='mix', startTime=STARTTIME, endTime=ENDTIME)
writer = MonoWriter(filename=beatFilename)
onsetsMarker = AudioOnsetsMarker(onsets=pool['ticks'])
audio.audio >> onsetsMarker.signal >> writer.audio
essentia.run(audio)
return beatFilename
def computeBeatsLoudness(filename, pool):
loader = MonoLoader(filename=filename,
sampleRate=pool['samplerate'],
downmix=pool['downmix'])
ticks = pool['ticks']#[pool['bestTicksStart']:pool['bestTicksStart']+32]
beatsLoud = BeatsLoudness(sampleRate = pool['samplerate'],
frequencyBands = barkBands, #EqBands, #scheirerBands, #barkBands,
beats=ticks)
loader.audio >> beatsLoud.signal
beatsLoud.loudness >> (pool, 'loudness')
beatsLoud.loudnessBandRatio >> (pool, 'loudnessBandRatio')
essentia.run(loader)
def computeSpectrum(signal):
#gen = VectorInput(signal)
#fc = FrameCutter(startFromZero=False, frameSize=48, hopSize=1)
#w = Windowing(zeroPhase=False)
#spec = Spectrum()
#p = essentia.Pool()
#gen.data >> fc.signal
#fc.frame >> w.frame >> spec.frame
#spec.spectrum >> (p,'spectrum')
#essentia.run(gen)
#pyplot.imshow(p['spectrum'], cmap=pyplot.cm.hot, aspect='auto', origin='lower')
corr = std.AutoCorrelation()(signal)
pyplot.plot(corr)
pyplot.show()
print argmax(corr[2:])+2
def isPowerTwo(n):
return (n&(n-1))==0
def isEvenHarmonic(a,b):
if a < 2 or b < 2: return False
if (a<b): return isEvenHarmonic(b,a)
return (a%b == 0) and isPowerTwo(a/b)
def getHarmonics(array):
size = len(array)
hist = [0]*size
counts = [1]*size
for idx1, x in enumerate(array):
for idx2, y in enumerate(array):
if isEvenHarmonic(idx1, idx2):
hist[idx1] += y
counts[idx1] += 1
hist = [hist[i]/float(counts[i]) for i in range(size)]
return hist
def plot(pool, title, outputfile='out.svg', subplot=111):
''' plots bars for each beat'''
#computeSpectrum(pool['loudness'])
ticks = pool['ticks']
#barSize = min([ticks[i+1] - ticks[i] for i in range(len(ticks[:-1]))])/2.
barSize = 0.8
offset = barSize/2.
loudness = pool['loudness']
loudnessBand = pool['loudnessBandRatio'] # ticks x bands
medianRatiosPerTick = []
meanRatiosPerTick = []
for tick, energy in enumerate(loudnessBand):
medianRatiosPerTick.append(median(energy))
meanRatiosPerTick.append(mean(energy))
loudnessBand = copy.deepcopy(loudnessBand.transpose()) # bands x ticks
#xcorr = std.CrossCorrelation(minLag=0, maxLag=16)
#acorr = std.AutoCorrelation()
#bandCorr = []
#for iBand, band in enumerate(loudnessBand):
# bandCorr.append(acorr(essentia.array(band)))
nBands = len(loudnessBand)
nticks = len(loudness)
maxRatiosPerBand = []
medianRatiosPerBand = []
meanRatiosPerBand = []
for idxBand, band in enumerate(loudnessBand):
maxRatiosPerBand.append([0]*nticks)
medianRatiosPerBand.append([0]*nticks)
meanRatiosPerBand.append([0]*nticks)
for idxTick in range(nticks):
start = idxTick
end = start+BEATWINDOW
if (end>nticks):
howmuch = end-nticks
end = nticks-1
start = end-howmuch
if start < 0: start = 0
medianRatiosPerBand[idxBand][idxTick] = median(band[start:end])
maxRatiosPerBand[idxBand][idxTick] = max(band[start:end])
meanRatiosPerBand[idxBand][idxTick] = mean(band[start:end])
for iBand, band in enumerate(loudnessBand):
for tick, ratio in enumerate(band):
#if ratio < medianRatiosPerBand[iBand][tick] and\
# ratio <= medianRatiosPerTick[tick]: loudnessBand[iBand][tick]=0
bandThreshold = max(medianRatiosPerBand[iBand][tick],
meanRatiosPerBand[iBand][tick])
tickThreshold = max(medianRatiosPerTick[tick],
meanRatiosPerTick[tick])
if ratio < bandThreshold and ratio <= tickThreshold:
loudnessBand[iBand][tick]=0
else:
loudnessBand[iBand][tick] *= loudness[tick]
#if loudnessBand[iBand][tick] > 1 : loudnessBand[iBand][tick] = 1
acorr = std.AutoCorrelation()
bandCorr = []
maxCorr = []
for iBand, band in enumerate(loudnessBand):
bandCorr.append(acorr(essentia.array(band)))
maxCorr.append(argmax(bandCorr[-1][2:])+2)
# use as much window space as possible:
pyplot.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
pyplot.subplot(511)
pyplot.imshow(bandCorr, cmap=pyplot.cm.hot, aspect='auto', origin='lower', interpolation='nearest')
print 'max correlation', maxCorr
sumCorr = []
for tick in range(nticks):
total = 0
for band in bandCorr:
total += band[tick]
sumCorr.append(total)
sumCorr[0] = 0
sumCorr[1] = 0
pyplot.subplot(512)
maxAlpha = max(sumCorr)
for i,val in enumerate(sumCorr):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, 1 , barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
print 'max sum correlation', argmax(sumCorr[2:])+2
hist = getHarmonics(sumCorr)
maxHist = argmax(hist)
print 'max histogram', maxHist
#for idx,val in enumerate(hist):
# if val < maxHist: hist[idx] = 0
pyplot.subplot(513)
for i,val in enumerate(hist):
pyplot.bar(i, val , barSize, align='edge',
bottom=0, color='r', edgecolor='w', linewidth=.3)
peakDetect = std.PeakDetection(maxPeaks=5,
orderBy='amplitude',
minPosition=0,
maxPosition=len(sumCorr)-1,
range=len(sumCorr)-1)
peaks = peakDetect(sumCorr)[0]
peaks = [round(x+1e-15) for x in peaks]
print 'Peaks:',peaks
pyplot.subplot(514)
maxAlpha = max(sumCorr)
for i,val in enumerate(sumCorr):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, val, barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
# multiply both histogram and sum corr to have a weighted histogram:
wHist = essentia.array(hist)*sumCorr*acorr(loudness)
maxHist = argmax(wHist)
print 'max weighted histogram', maxHist
pyplot.subplot(515)
maxAlpha = max(wHist)
for i,val in enumerate(wHist):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, val, barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
pyplot.savefig(outputfile, dpi=300)
#pyplot.show()
return
def ossplay(filename): # play audio thru oss
from wave import open as waveOpen
from ossaudiodev import open as ossOpen
s = waveOpen(filename,'rb')
(nc,sw,fr,nf,comptype, compname) = s.getparams( )
dsp = ossOpen('/dev/dsp','w')
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
dsp.setparameters(AFMT_S16_NE, nc, fr)
data = s.readframes(nf)
s.close()
dsp.write(data)
dsp.close()
def getkey(audioFilename, device, f, card, lock):
c = None
b = True
while b:
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd)
#new = termios.tcgetattr(fd)
#new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO
#new[6][TERMIOS.VMIN] = 1
#new[6][TERMIOS.VTIME] = 0
#termios.tcsetattr(fd, TERMIOS.TCSANOW, new)
#c = None
lock.acquire()
#try:
# c = os.read(fd, 1)
#finally:
# termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)
#if c == '\n': ## break on a Return/Enter keypress
# b = False
# return
#if c==' ': playAudio(audioFilename)
#else: print 'got', c
#ossplay(audioFilename)
alsaplay(audioFilename, device, f, card)
lock.release()
time.sleep(0.1)
def alsaplay(filename, device, f, card):
device.setchannels(f.getnchannels())
device.setrate(f.getframerate())
# 8bit is unsigned in wav files
if f.getsampwidth() == 1:
device.setformat(alsaaudio.PCM_FORMAT_U8)
# Otherwise we assume signed data, little endian
elif f.getsampwidth() == 2:
device.setformat(alsaaudio.PCM_FORMAT_S16_LE)
elif f.getsampwidth() == 3:
device.setformat(alsaaudio.PCM_FORMAT_S24_LE)
elif f.getsampwidth() == 4:
device.setformat(alsaaudio.PCM_FORMAT_S32_LE)
else:
raise ValueError('Unsupported format')
device.setperiodsize(320)
data = f.readframes(320)
while data:
device.write(data)
data = f.readframes(320)
f.setpos(0)
if __name__ == '__main__':
if len(sys.argv) < 1:
usage()
sys.exit(1)
step = 1
if len(sys.argv) > 2:
step = int(sys.argv[-1])
inputfilename = sys.argv[1]
ext = os.path.splitext(inputfilename)[1]
if ext == '.txt': # input file contains a list of audio files
files = open(inputfilename).read().split('\n')[:-1]
else: files = [inputfilename]
for audiofile in files:
print "*"*70
print "Processing ", audiofile
print "*"*70
try:
print 'realBpm', open(audiofile.replace('wav', 'bpm')).read()
except:
print 'realBpm not found'
pool = essentia.Pool()
pool.set('downmix', DOWNMIX)
pool.set('framesize', FRAMESIZE)
pool.set('hopsize', HOPSIZE)
pool.set('weight', WEIGHT)
pool.set('samplerate', SAMPLERATE)
pool.set('window', WINDOW)
pool.set('framerate', FRAMERATE)
pool.set('tempo_framesize', TEMPO_FRAMESIZE)
pool.set('tempo_overlap', TEMPO_OVERLAP)
pool.set('step', step)
#computeSegmentation(audiofile, pool)
#segments = pool['segments']
computeBeats(audiofile, pool)
beatFilename = writeBeatFile(audiofile, pool)
computeBeatsLoudness(audiofile, pool)
imgfilename = os.path.splitext(audiofile)[0]+'.png'
#imgfilename = imgfilename.split(os.sep)[-1]
#print 'plotting', imgfilename
if sys.platform == 'darwin' or sys.platform == 'win32':
plot(pool,'beats loudness ' + str(audiofile), imgfilename);
else:
card = 'default'
f = wave.open(beatFilename, 'rb')
# print '%d channels, sampling rate: %d \n' % (f.getnchannels(),
# f.getframerate())
device = alsaaudio.PCM(card=card)
lock = thread.allocate_lock()
thread.start_new_thread(getkey, (beatFilename, device, f, card, lock))
plot(pool,'beats loudness ' + audiofile, imgfilename);
f.close()
thread.exit()
#print 'deleting beatfile:', beatFilename
#subprocess.call(['rm', beatFilename])
| agpl-3.0 |
benoitsteiner/tensorflow | tensorflow/examples/learn/boston.py | 33 | 1981 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Transform
x_transformed = scaler.transform(x_test)
# Predict and score
y_predicted = list(regressor.predict(x_transformed, as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
peterk87/sistr_cmd | sistr/misc/add_ref_genomes.py | 1 | 20097 | #!/usr/bin/env python
import argparse
from collections import defaultdict
import logging, shutil
import os
from subprocess import Popen
import re
from datetime import datetime
import sys
import pandas as pd
import numpy as np
from sistr.misc.reduce_to_centroid_alleles import run_allele_reduction
from sistr.sistr_cmd import genome_name_from_fasta_path
from sistr.src.blast_wrapper import BlastRunner
from sistr.src.logger import init_console_logger
from sistr.src.parsers import parse_fasta
from sistr.src.serovar_prediction import SerovarPredictor, overall_serovar_call
from sistr.src.cgmlst import CGMLST_PROFILES_PATH, run_cgmlst, allele_name, CGMLST_FULL_FASTA_PATH
from sistr.src.serovar_prediction.constants import GENOMES_TO_SEROVAR_PATH, GENOMES_TO_SPP_PATH, SEROVAR_TABLE_PATH
from sistr.src.mash import MASH_SKETCH_FILE
def init_parser():
prog_desc = '''Add reference genomes to sistr_cmd
Supply genome FASTA files and a table with genome name to serovar (and subspecies). If genome not present in table or table not supplied then the serovar and subspecies predictions will be used instead.
sistr_cmd ref genome info files will be written to an output directory
'''
parser = argparse.ArgumentParser(prog='predict_serovar',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=prog_desc)
parser.add_argument('fastas',
metavar='F',
nargs='+',
help='Input genome FASTA file(s). Genome names in filenames before file extension (e.g. for "g1.fasta" genome name is "g1")')
parser.add_argument('-o',
'--outdir',
required=True,
help='Output destination')
parser.add_argument('-s',
'--serovar-table',
help='Table with serovar (and subspecies). CSV expected if extension is .csv; otherwise tab delimited expected. Columns=[genome,serovar, subspecies(optional)]')
parser.add_argument('--force',
action='store_true',
help='Force overwrite of output directory if it exists!')
parser.add_argument('-T',
'--tmp-dir',
default='/tmp',
help='Base temporary working directory for intermediate analysis files.')
parser.add_argument('-t', '--threads',
type=int,
default=1,
help='Number of parallel threads to run sistr_cmd analysis.')
parser.add_argument('-v',
'--verbose',
action='count',
default=2,
help='Logging verbosity level (-v to show warnings; -vvv to show debug info)')
return parser
def sketch_fasta(fasta_path, outdir):
"""Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path
"""
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path
def merge_sketches(outdir, sketch_paths):
"""Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches
"""
merge_sketch_path = os.path.join(outdir, 'sistr.msh')
args = ['mash', 'paste', merge_sketch_path]
for x in sketch_paths:
args.append(x)
args.append(MASH_SKETCH_FILE)
logging.info('Running Mash paste with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)
return merge_sketch_path
def create_subdirs(outdir, *args):
subdir = os.path.join(outdir, *args)
try:
os.makedirs(subdir)
return subdir
except Exception as ex:
if os.path.exists(subdir):
return subdir
logging.error(ex)
def merge_cgmlst_prediction(serovar_prediction, cgmlst_prediction):
serovar_prediction.cgmlst_distance = cgmlst_prediction['distance']
serovar_prediction.cgmlst_genome_match = cgmlst_prediction['genome_match']
serovar_prediction.serovar_cgmlst = cgmlst_prediction['serovar']
serovar_prediction.cgmlst_matching_alleles = cgmlst_prediction['matching_alleles']
serovar_prediction.cgmlst_subspecies = cgmlst_prediction['subspecies']
return serovar_prediction
def run_sistr(input_fasta, tmp_dir):
blast_runner = None
try:
assert os.path.exists(input_fasta), "Input fasta file '%s' must exist!" % input_fasta
fasta_filename = os.path.basename(input_fasta)
genome_name = genome_name_from_fasta_path(input_fasta)
dtnow = datetime.now()
genome_tmp_dir = os.path.join(tmp_dir, dtnow.strftime("%Y%m%d%H%M%S") + '-' + 'SISTR' + '-' + genome_name)
blast_runner = BlastRunner(input_fasta, genome_tmp_dir)
logging.info('Initializing temporary analysis directory "%s" and preparing for BLAST searching.',
genome_tmp_dir)
blast_runner.prep_blast()
logging.info('Temporary FASTA file copied to %s', blast_runner.tmp_fasta_path)
cgmlst_prediction, cgmlst_results = run_cgmlst(blast_runner)
spp = cgmlst_prediction['subspecies']
serovar_predictor = SerovarPredictor(blast_runner, spp)
serovar_predictor.predict_serovar_from_antigen_blast()
prediction = serovar_predictor.get_serovar_prediction()
merge_cgmlst_prediction(prediction, cgmlst_prediction)
overall_serovar_call(prediction, serovar_predictor)
logging.info('%s | Antigen gene BLAST serovar prediction: "%s" serogroup=%s:H1=%s:H2=%s',
fasta_filename,
prediction.serovar_antigen,
prediction.serogroup,
prediction.h1,
prediction.h2)
logging.info('%s | Subspecies prediction: %s',
fasta_filename,
spp)
logging.info('%s | Overall serovar prediction: %s',
fasta_filename,
prediction.serovar)
finally:
logging.info('Deleting temporary working directory at %s', blast_runner.tmp_work_dir)
blast_runner.cleanup()
return prediction, cgmlst_results
def cgmlst_profiles_df(fastas, cgmlst_results):
genome_marker_cgmlst_result = {}
for fasta, res in zip(fastas, cgmlst_results):
genome = genome_name_from_fasta_path(fasta)
tmp = {}
for marker, res_dict in res.items():
aname = res_dict['name']
tmp[marker] = int(aname) if aname is not None else None
genome_marker_cgmlst_result[genome] = tmp
return pd.DataFrame(genome_marker_cgmlst_result).transpose()
def write_cgmlst_fasta(outdir, cgmlst_results):
marker_allele_seqs = defaultdict(set)
allowed_nts = set('ATGCatgc')
for h, s in parse_fasta(CGMLST_FULL_FASTA_PATH):
marker, allele = h.split('|')
s = s.replace('-', '')
forbidden_char = set(s) - allowed_nts
if len(forbidden_char) > 0:
logging.warning('Forbidden nucleotide characters %s in allele "%s". Skipping this allele!',
forbidden_char,
h)
continue
marker_allele_seqs[marker].add(s)
# init default dict with int where values start as int 0
new_allele_count = defaultdict(int)
for x in cgmlst_results:
for marker, res in x.items():
seq = res['seq']
if seq is not None:
if seq not in marker_allele_seqs[marker]:
new_allele_count[marker] += 1
if '-' in seq:
logging.error('marker %s | result %s', marker, res)
marker_allele_seqs[marker].add(seq)
for marker in sorted(new_allele_count.keys()):
logging.info('Added %s new alleles for marker %s', new_allele_count[marker], marker)
new_cgmlst_fasta_path = os.path.join(outdir, 'cgmlst-full.fasta')
with open(new_cgmlst_fasta_path, 'w') as fout:
for marker in sorted(marker_allele_seqs.keys()):
seqs = marker_allele_seqs[marker]
for seq in seqs:
fout.write('>{}|{}\n{}\n'.format(marker, allele_name(seq), seq))
logging.info('cgMLST FASTA written to "%s" with %s novel alleles',
new_cgmlst_fasta_path,
sum([v for k, v in new_allele_count.items()]))
return new_cgmlst_fasta_path
def write_cgmlst_profiles_csv(outdir, cgmlst_results, genome_names):
df_profiles_old = pd.read_csv(CGMLST_PROFILES_PATH, index_col=0)
markers = df_profiles_old.columns
genome_marker_allele_results = defaultdict(dict)
for genome, cgmlst_result in zip(genome_names, cgmlst_results):
for marker in markers:
allele = None
if marker in cgmlst_result:
r = cgmlst_result[marker]
if 'name' in r:
allele = int(r['name']) if r['name'] is not None else None
else:
allele = None
genome_marker_allele_results[genome][marker] = allele
df_profiles_new = pd.DataFrame(genome_marker_allele_results).transpose()
df_all_profiles = pd.concat([df_profiles_new, df_profiles_old])
profiles_output_path = os.path.join(outdir, 'cgmlst-profiles.csv')
df_all_profiles.to_csv(profiles_output_path, float_format='%.0f')
assert os.path.exists(profiles_output_path), 'cgMLST profiles CSV file was not written to "{}"'.format(
profiles_output_path)
logging.info('cgMLST profiles (dim=%s) CSV written to "%s"',
df_all_profiles.shape,
profiles_output_path)
def read_genomes_to_x(path):
out = {}
with open(path) as f:
for l in f:
l = l.strip()
g, s = l.split('\t')
out[g] = s
return out
def write_genomes_to_x_table(path, genome_to_x):
with open(path, 'w') as fout:
for k, v in genome_to_x.items():
fout.write('{}\t{}\n'.format(k, v))
def write_serovar_and_spp_tables(outdir, df_serovar, predictions, genome_names):
genome_serovar = read_genomes_to_x(GENOMES_TO_SEROVAR_PATH)
genome_spp = read_genomes_to_x(GENOMES_TO_SPP_PATH)
# prediction serovars and subspecies
pred_genome_serovar = {}
pred_genome_spp = {}
for genome, prediction in zip(genome_names, predictions):
pred_dict = prediction.__dict__
pred_genome_serovar[genome] = pred_dict['serovar']
if 'cgmlst_subspecies' in pred_dict:
pred_genome_spp[genome] = pred_dict['cgmlst_subspecies']
else:
pred_genome_spp[genome] = None
if df_serovar is not None:
for i, row in df_serovar.iterrows():
genome = row['genome']
serovar = row['serovar']
if not serovar in pred_genome_serovar[genome]:
logging.warning('Genome "%s" user specified serovar "%s" not in serovar prediction "%s"',
genome,
serovar,
pred_genome_serovar[genome])
if 'subspecies' in df_serovar:
spp = row['subspecies']
if spp != pred_genome_spp[genome]:
logging.warning('Genome "%s" provided subspecies of "%s" does not match prediction of "%s"',
genome,
spp,
pred_genome_spp[genome])
else:
spp = pred_genome_spp[genome]
logging.warning('Genome "%s" subspecies info not provided. Using subspecies prediction of "%s"',
genome,
spp)
genome_serovar[genome] = serovar
genome_spp[genome] = spp
else:
logging.warning(
'User did not specify serovar/subspecies table! Using SISTR serovar and subspecies predictions for all genomes.')
for genome in genome_names:
genome_serovar[genome] = pred_genome_serovar[genome]
genome_spp[genome] = pred_genome_spp[genome]
genomes_to_serovar_path = os.path.join(outdir, 'genomes-to-serovar.txt')
genomes_to_spp_path = os.path.join(outdir, 'genomes-to-subspecies.txt')
write_genomes_to_x_table(genomes_to_serovar_path, genome_serovar)
assert os.path.exists(genomes_to_serovar_path), '{} file could not be written!'.format(
genomes_to_serovar_path)
logging.info('Wrote genomes to serovars table at %s', genomes_to_serovar_path)
write_genomes_to_x_table(genomes_to_spp_path, genome_spp)
assert os.path.exists(genomes_to_spp_path), '{} file could not be written!'.format(
genomes_to_spp_path)
logging.info('Wrote genomes to subspecies table at %s', genomes_to_spp_path)
def create_merge_mash_sketches(input_fastas, data_outdir, sketch_outdir):
sketch_paths = [sketch_fasta(fasta, sketch_outdir) for fasta in input_fastas]
merge_sketches(data_outdir, sketch_paths)
def write_cgmlst_profiles_hdf5(outdir, cgmlst_results, genome_names):
df_profiles_old = pd.read_hdf(CGMLST_PROFILES_PATH, key='cgmlst')
markers = df_profiles_old.columns
genome_marker_allele_results = defaultdict(dict)
for genome, cgmlst_result in zip(genome_names, cgmlst_results):
for marker in markers:
allele = None
if marker in cgmlst_result:
r = cgmlst_result[marker]
if 'name' in r:
allele = int(r['name']) if r['name'] is not None else None
else:
allele = None
genome_marker_allele_results[genome][marker] = allele
df_profiles_new = pd.DataFrame(genome_marker_allele_results).transpose()
df_all_profiles = pd.concat([df_profiles_new, df_profiles_old])
profiles_output_path = os.path.join(outdir, 'cgmlst-profiles.hdf')
df_all_profiles.to_hdf(profiles_output_path, float_format='%.0f',key='cgmlst')
assert os.path.exists(profiles_output_path), 'cgMLST profiles HDF5 file was not written to "{}"'.format(
profiles_output_path)
logging.info('cgMLST profiles (dim=%s) HDF5 written to "%s"',
df_all_profiles.shape,
profiles_output_path)
def main():
parser = init_parser()
args = parser.parse_args()
init_console_logger(args.verbose)
logging.debug(args)
input_fastas = args.fastas
outdir = args.outdir
tmp_dir = args.tmp_dir
serovar_table_path = args.serovar_table
threads = args.threads
force = args.force
assert len(input_fastas) > 0, 'No FASTA files specified!'
for input_fasta in input_fastas:
assert os.path.exists(input_fasta), 'Genome FASTA file does not exist at "{}"'.format(input_fasta)
genome_names = [genome_name_from_fasta_path(x) for x in input_fastas]
logging.info('You have specified %s genomes to add to current sistr_cmd data files! %s',
len(genome_names),
genome_names)
if os.path.exists(outdir):
if not force:
raise Exception('Output directory already exists at {}!'.format(outdir))
else:
shutil.rmtree(outdir)
logging.warning('Using existing output directory at %s', outdir)
try:
os.makedirs(outdir)
except:
pass
assert os.path.exists(outdir), 'Output directory could not be created!'
if serovar_table_path:
assert os.path.exists(serovar_table_path), 'Provided serovar table path does not exist! {}'.format(
serovar_table_path)
logging.info('Parsing serovar table from "%s"', serovar_table_path)
if re.match(r'.*.csv$', serovar_table_path):
logging.info('Trying to read serovar table "%s" as CSV', serovar_table_path)
df_serovar = pd.read_csv(serovar_table_path)
else:
logging.info('Trying to read serovar table "%s" as tab-delimited', serovar_table_path)
df_serovar = pd.read_table(serovar_table_path)
expected_columns = ['genome', 'serovar','subspecies']
assert np.all(
df_serovar.columns.isin(expected_columns)), 'User serovar table did not contain expected columns {}'.format(
expected_columns)
if 'subspecies' not in df_serovar.columns:
logging.warning(
'User serovar table did not contain "subspecies" column so the sistr_cmd subspecies prediction will be used!')
genome_names_series = pd.Series(genome_names)
genomes_in_serovar_table = genome_names_series.isin(df_serovar.genome)
if not np.all(genomes_in_serovar_table):
missing_genomes = '-->,->'.join([x for x in genome_names_series[~genomes_in_serovar_table]])
logging.error('The following genomes were not found in the serovar table: %s', missing_genomes)
raise Exception('Not all user provided genome FASTA files in the provided serovar table!')
df_wklm = pd.read_csv(SEROVAR_TABLE_PATH)
logging.info('Checking for non-standard serovar designations')
serovars_not_in_wklm = df_serovar.serovar[~df_serovar.serovar.isin(df_wklm.Serovar)]
for row_idx, serovar in serovars_not_in_wklm.iteritems():
logging.warning('Non-standard serovar %s at row %s for genome %s!', serovar, row_idx,
df_serovar.ix[row_idx]['genome'])
else:
logging.warning('No genome to serovar table specified! Using SISTR serovar predictions')
df_serovar = None
if threads == 1:
logging.info('Serial single threaded run mode on %s genomes', len(input_fastas))
outputs = [run_sistr(input_fasta, tmp_dir) for input_fasta in input_fastas]
else:
from multiprocessing import Pool
logging.info('Initializing thread pool with %s threads', threads)
pool = Pool(processes=threads)
logging.info('Running SISTR analysis asynchronously on %s genomes', len(input_fastas))
res = [pool.apply_async(run_sistr, (input_fasta, tmp_dir)) for input_fasta in input_fastas]
logging.info('Getting SISTR analysis results')
outputs = [x.get() for x in res]
# collect results from sistr analysis
prediction_outputs = [x for x, y in outputs]
cgmlst_results = [y for x, y in outputs]
# create some output dirs
data_outdir = create_subdirs(outdir, 'data')
cgmlst_outdir = create_subdirs(outdir, 'data', 'cgmlst')
sketch_outdir = create_subdirs(outdir, 'mash-sketches')
# write files with new and old data
cgmlst_fasta = write_cgmlst_fasta(cgmlst_outdir, cgmlst_results)
write_cgmlst_profiles_hdf5(cgmlst_outdir, cgmlst_results, genome_names)
write_serovar_and_spp_tables(data_outdir, df_serovar, prediction_outputs, genome_names)
create_merge_mash_sketches(input_fastas, data_outdir, sketch_outdir)
centroid_alleles_path = os.path.join(cgmlst_outdir, 'cgmlst-centroid.fasta')
run_allele_reduction(cgmlst_fasta, centroid_alleles_path, threads=threads)
logging.info('Done!')
if __name__ == '__main__':
main()
| apache-2.0 |
bendemott/Python-Shapely-Examples | shapelyAreaSearch.py | 2 | 2419 | '''
@author Ben DeMott
@file shapely_radius_plot.py
In this example we will perform an area/radius search.
We will create a bunch of points in a 2d coordinate system and then we will
create a circle or a perimeter. The we will do a search for any points that
are within the circles perimeter! :) :) :)
'''
import random, os, sys, time, numpy
from shapely import *
from shapely.geometry import Point
filename = 'radius_search.png'
numberOfPoints = 1000
gridWidth = 10000
gridHeight = 10000
shapePoints = []
circleRadius = random.randint(500, 1500)
circleX = random.randint(0, gridWidth)
circleY = random.randint(0, gridHeight)
sCircle = Point(circleX, circleY)
sCircle = sCircle.buffer(circleRadius, 16)
pointList = []
for i in range(numberOfPoints):
x = random.randint(0, gridWidth)
y = random.randint(0, gridHeight)
pointList.append((x, y))
iPoint = Point(x, y)
iPoint.idx = i # set our custom attribute. (if this doesnt work I have other ways)
shapePoints.append(iPoint)
matchingPoints = []
searchBench = time.time()
for idx, point in enumerate(shapePoints):
if sCircle.contains(point):
matchingPoints.append(idx)
searchBench = time.time() - searchBench
print "There were %d points within the circle [%d, %d] - r[%d]\n" % (len(matchingPoints), circleX, circleY, circleRadius)
print "Calculation Took %s seconds for %s points" % (searchBench, numberOfPoints)
print "Saving Graph to %s" % (filename)
#_-------------------------------------------------------------------------------
# DRAW A REPRESENTATION OF THE LOGIC ABOVE:
import matplotlib
matplotlib.use('Agg') # Do NOT attempt to open X11 instance
from pylab import *
from matplotlib.patches import Circle
import matplotlib.pyplot as pyplot
matplotlib.rcParams['lines.linewidth'] = 2
pyplot.axis([0, gridWidth, 0, gridHeight])
pyplot.grid(True)
# Setting the axis labels.
pyplot.xlabel('X Space')
pyplot.ylabel('Y Space')
#Give the plot a title
pyplot.title('Radius Search Plot Using Shapely (%d Points)' % (numberOfPoints))
# Draw the collision circle/boundary
cir = Circle((circleX, circleY), radius=circleRadius, fc='b')
cir.set_alpha(0.4)
pyplot.gca().add_patch(cir)
for idx, point in enumerate(pointList):
style = 'go'
iAlpha = 0.4
if(idx in matchingPoints):
style = 'ro'
iAlpha = 1
pyplot.plot(point[0], point[1], style, linewidth=1, markersize=3, alpha=iAlpha)
pyplot.savefig(os.getcwd()+'/'+str(filename))
| mit |
jungla/ICOM-fluidity-toolbox | functions.py/detect_peaks.py | 2 | 6546 | from __future__ import division, print_function
import numpy as np
__author__ = "Marcos Duarte, https://github.com/demotu/BMC"
__version__ = "1.0.4"
__license__ = "MIT"
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
# plt.grid()
plt.show()
| gpl-2.0 |
NunoEdgarGub1/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
rohanp/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 28 | 11870 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4)
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 1000000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 2)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(9):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y*2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 1000 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(1000):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
zingale/pyro2 | analysis/sedov_compare.py | 2 | 4306 | #!/usr/bin/env python3
from __future__ import print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from util import io
import argparse
mpl.rcParams["text.usetex"] = True
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
# font sizes
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['figure.titlesize'] = 'medium'
usage = """
compare the output for a Sedov problem with the exact solution contained
in cylindrical-sedov.out. To do this, we need to bin the Sedov data
into radial bins."""
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-o", type=str, default="sedov_compare.png",
metavar="plot.png", help="output file name")
parser.add_argument("plotfile", type=str, nargs=1,
help="the plotfile you wish to plot")
args = parser.parse_args()
# read the data and convert to the primitive variables (and velocity
# magnitude)
sim = io.read(args.plotfile[0])
myd = sim.cc_data
myg = myd.grid
dens = myd.get_var("density")
xmom = myd.get_var("x-momentum")
ymom = myd.get_var("y-momentum")
ener = myd.get_var("energy")
rho = dens.v()
u = np.sqrt(xmom.v()**2 + ymom.v()**2)/rho
e = (ener.v() - 0.5*rho*u*u)/rho
gamma = myd.get_aux("gamma")
p = rho*e*(gamma - 1.0)
# get the exact solution
exact = np.loadtxt("cylindrical-sedov.out")
x_exact = exact[:, 1]
rho_exact = exact[:, 2]
u_exact = exact[:, 5]
p_exact = exact[:, 4]
# e_exact = exact[:, 4]
# radially bin
# see http://code.google.com/p/agpy/source/browse/trunk/agpy/radialprofile.py?r=317
# for inspiration
# first define the bins
rmin = 0
rmax = np.sqrt(myg.xmax**2 + myg.ymax**2)
nbins = np.int(np.sqrt(myg.nx**2 + myg.ny**2))
# bins holds the edges, so there is one more value than actual bin
# bin_centers holds the center value of the bin
bins = np.linspace(rmin, rmax, nbins+1)
bin_centers = 0.5*(bins[1:] + bins[:-1])
# radius of each zone
xcenter = 0.5*(myg.xmin + myg.xmax)
ycenter = 0.5*(myg.ymin + myg.ymax)
r = np.sqrt((myg.x2d[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1] - xcenter)**2 +
(myg.y2d[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1] - ycenter)**2)
# bin the radii -- digitize returns an array with the same shape as
# the input array but with elements of the array specifying which bin
# that location belongs to. The value of whichbin will be 1 if we are
# located in the bin defined by bins[0] to bins[1]. This means that
# there will be no 0s
whichbin = np.digitize(r.flat, bins)
# bincount counts the number of occurrences of each non-negative
# integer value in whichbin. Each entry in ncount gives the number
# of occurrences of it in whichbin. The length of ncount is
# set by the maximum value in whichbin
ncount = np.bincount(whichbin)
# now bin the associated data
rho_bin = np.zeros(len(ncount)-1, dtype=np.float64)
u_bin = np.zeros(len(ncount)-1, dtype=np.float64)
p_bin = np.zeros(len(ncount)-1, dtype=np.float64)
for n in range(len(ncount)):
# remember that there are no whichbin == 0, since that corresponds
# to the left edge. So we want whichbin == 1 to correspond to the
# first value of bin_centers (bin_centers[0])
rho_bin[n-1] = np.sum(rho.flat[whichbin == n])/np.sum(ncount[n])
u_bin[n-1] = np.sum(u.flat[whichbin == n])/np.sum(ncount[n])
p_bin[n-1] = np.sum(p.flat[whichbin == n])/np.sum(ncount[n])
bin_centers = bin_centers[0:len(ncount)-1]
# plot
fig, axes = plt.subplots(nrows=3, ncols=1, num=1)
plt.rc("font", size=10)
ax = axes.flat[0]
ax.plot(x_exact, rho_exact, color="C0", zorder=-100, label="exact")
ax.scatter(bin_centers, rho_bin, marker="x", s=7, color="C1", label="simulation")
ax.set_ylabel(r"$\rho$")
ax.set_xlim(0, 0.6)
ax.legend(frameon=False, loc="best", fontsize="small")
ax = axes.flat[1]
ax.plot(x_exact, u_exact, color="C0", zorder=-100)
ax.scatter(bin_centers, u_bin, marker="x", s=7, color="C1")
ax.set_ylabel(r"$u$")
ax.set_xlim(0, 0.6)
ax = axes.flat[2]
ax.plot(x_exact, p_exact, color="C0", zorder=-100)
ax.scatter(bin_centers, p_bin, marker="x", s=7, color="C1")
ax.set_ylabel(r"$p$")
ax.set_xlim(0, 0.6)
ax.set_xlabel(r"r")
plt.subplots_adjust(hspace=0.25)
fig.set_size_inches(4.5, 8.0)
plt.savefig(args.o, bbox_inches="tight")
| bsd-3-clause |
talbarda/kaggle_predict_house_prices | Build Model.py | 1 | 2629 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import pandas as pd
import sklearn.linear_model as lm
from sklearn.model_selection import learning_curve
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
def get_model(estimator, parameters, X_train, y_train, scoring):
model = GridSearchCV(estimator, param_grid=parameters, scoring=scoring)
model.fit(X_train, y_train)
return model.best_estimator_
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5), scoring='accuracy'):
plt.figure(figsize=(10,6))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel(scoring)
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, scoring=scoring,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
for c in train:
train[c] = pd.Categorical(train[c].values).codes
X = train.drop(['SalePrice'], axis=1)
X = train[['OverallQual', 'GarageArea', 'GarageCars', 'TotalBsmtSF', 'TotRmsAbvGrd', 'FullBath', 'GrLivArea']]
y = train.SalePrice
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
scoring = make_scorer(accuracy_score, greater_is_better=True)
from sklearn.linear_model import RidgeCV
RidgeCV.fit(X, y, sample_weight=None)
clf_ridge = RidgeCV()
print (accuracy_score(y_test, clf_ridge.predict(X_test)))
print (clf_ridge)
plt = plot_learning_curve(clf_ridge, 'GaussianNB', X, y, cv=4);
plt.show() | mit |
466152112/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
Obus/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
dsullivan7/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/linear_model/plot_theilsen.py | 100 | 3846 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| bsd-3-clause |
sgoodm/python-distance-rasters | src/distancerasters/main.py | 1 | 4523 |
from __future__ import absolute_import
import time
import numpy as np
from affine import Affine
from scipy.spatial import cKDTree
from .utils import export_raster, convert_index_to_coords, calc_haversine_distance
def build_distance_array(raster_array, affine=None, output=None, conditional=None):
"""build distance array from raster array
Args
raster_array (np array):
array to use for distance calculations
affine (Affine): [optional]
affine transformation defining spatial raster data
output (str): [optional, requires affine arg]
path to export distance array as geotiff raster
conditional (function): [optional]
function which applies conditional to raster_array in order to
define which elements distances are calculate to
(default function finds distance to elements with a value of 1)
Returns
resulting distance array
"""
if affine is not None and not isinstance(affine, Affine):
raise Exception('If provided, affine must be an instance of Affine class')
if affine is None and output is not None:
raise Exception('Affine is required for output')
if affine is not None:
pixel_size = affine[0]
nrows, ncols = raster_array.shape
# output array for distance raster results
z = np.empty(raster_array.shape, dtype=float)
def default_conditional(rarray):
return (rarray == 1)
if conditional is None:
conditional = default_conditional
elif not callable(conditional):
raise Exception('Conditional must be function')
# ----------------------------------------
t_start = time.time()
# kd-tree instance
# https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.spatial.cKDTree.html
#
# Alternatives (slower during testing):
# from sklearn.neighbors import KDTree, BallTree
# http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
# http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html
k = cKDTree(
data=np.array(np.where(conditional(raster_array))).T,
leafsize=64
)
print "Tree build time: {0} seconds".format(time.time() - t_start)
# ----------------------------------------
# t1, t1c = 0, 0
# t2, t2c = 0, 0
print "Building distance array..."
for r in xrange(nrows):
for c in xrange(ncols):
cur_index = (r, c)
# print "Current index (r, c): {0}".format(cur_index)
# print "Current coords (lon, lat): {0}".format(
# convert_index_to_coords(cur_index, affine))
# t1s = time.time()
min_dist, min_index = k.query([cur_index])
min_dist = min_dist[0]
min_index = k.data[min_index[0]]
# t1 += time.time() - t1s
# t1c += 1
# print "\tmin_dist: {0}".format(min_dist)
# print "\tmin_index: {0}".format(min_index)
# print "\tMin coords (lon, lat): {0}".format(
# convert_index_to_coords(min_index, affine))
# t2s = time.time()
if affine is not None:
if cur_index[1] == min_index[1]:
# columns are same meaning nearest is either vertical or self.
# no correction needed, just convert to km
dd_min_dist = min_dist * pixel_size
km_min_dist = dd_min_dist * 111.321
else:
km_min_dist = calc_haversine_distance(
convert_index_to_coords(cur_index, affine),
convert_index_to_coords(min_index, affine)
)
val = km_min_dist * 1000
else:
val = min_dist
# t2 += time.time() - t2s
# t2c += 1
z[r][c] = val
# print "\tMin dist (m): {0}".format(km_min_dist * 1000)
# raise
# print raster_array
# print z
# print raster_array.shape
# print nrows * ncols
# print "t1 total: {0}, count: {1}, avg: {2}".format(t1, t1c, t1/t1c)
# print "t2 total: {0}, count: {1}, avg: {2}".format(t2, t2c, t2/t2c)
print "Total run time: {0} seconds".format(round(time.time() - t_start, 2))
# ----------------------------------------
if output is not None:
export_raster(z, affine, output)
return z
| bsd-3-clause |
google-research/google-research | using_dl_to_annotate_protein_universe/hmm_baseline/hmmer_utils_test.py | 1 | 16884 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for module inference.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import hmmer_utils
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import test_util
import util
FLAGS = flags.FLAGS
# Made by running hmmsearch --tblout pfam_output/PF00131.19.txt
# pfam_hmm/PF00131.19.hmm testseqs.fasta
_HMMER_TBLOUT = """
# --- full sequence ---- --- best 2 domain ---- --- domain number estimation ----
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description of target
# ------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- ---------------------
MT4_CANLF/1-62_PF00131.19 - PF00131.19 - 1.3e-15 60.9 58.4 1.4e-15 60.8 58.4 1.0 1 0 0 1 1 1 1 -
E4X7F8_OIKDI/453-561_PF05033.15 - PF00131.19 - 0.6 13.9 5.0 0.6 13.9 5.0 2.3 2 0 0 2 2 2 0 -
#
# Program: hmmsearch
# Version: 3.1b2 (February 2015)
# Pipeline mode: SEARCH
# Query file: pfam_hmm/PF00131.19.hmm
# Target file: testseqs.fasta
# Option settings: hmmsearch --tblout pfam_output/PF00131.19.txt pfam_hmm/PF00131.19.hmm testseqs.fasta
# Date: Sat Oct 20 12:26:56 2018
# [ok]
"""
_HMMER_TBLOUT_NO_OUTPUT = """
# --- full sequence ---- --- best 1 domain ---- --- domain number estimation ----
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description of target
# ------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- ---------------------
#
# Program: hmmsearch
# Version: 3.1b2 (February 2015)
# Pipeline mode: SEARCH
# Query file: pfam_hmm/PF00131.19.hmm
# Target file: testseqs.fasta
# Option settings: hmmsearch --tblout pfam_output/PF00131.19.txt pfam_hmm/PF00131.19.hmm testseqs.fasta
# Date: Sat Oct 20 12:26:56 2018
# [ok]
"""
class HMMerUtilsTest(parameterized.TestCase):
def testGetFamilyNameFromUnderscores(self):
# Sequence name contains underscores.
actual = hmmer_utils.get_family_name_from('V2R_HUMAN/54-325_PF00001.20')
expected = 'PF00001.20'
self.assertEqual(actual, expected)
def testGetFamilyNameFromNoUnderscores(self):
# Sequence name has no underscores.
actual = hmmer_utils.get_family_name_from('Q12345_alanine')
expected = 'alanine'
self.assertEqual(actual, expected)
def testGetSequenceNameFrom(self):
actual = hmmer_utils.get_sequence_name_from('V2R_HUMAN/54-325_PF00001.20')
expected = 'V2R_HUMAN/54-325'
self.assertEqual(actual, expected)
def testFormatAsCsvHmmerOutput(self):
hmmer_output = hmmer_utils.HMMEROutput(
sequence_name='MT4_CANLF/1-62',
predicted_label='PF00131.19',
true_label='PF12345.6',
score=60.9,
domain_evalue=1.3e-15,
)
# order should match hmmer_utils.HMMER_OUTPUT_CSV_COLUMN_HEADERS.
# (That is, util.PREDICTION_FILE_COLUMN_NAMES + [DATAFRAME_SCORE_NAME_KEY].)
expected = 'MT4_CANLF/1-62,PF12345.6,PF00131.19,60.9,1.3e-15'
actual = hmmer_output.format_as_csv()
self.assertEqual(actual, expected)
def testParseHmmOutput(self):
actual_hmmsearch = list(
hmmer_utils.parse_hmmer_output(_HMMER_TBLOUT, 'PF00131.19'))
print(actual_hmmsearch)
expected_hmmsearch = [
hmmer_utils.HMMEROutput(
sequence_name='MT4_CANLF/1-62',
predicted_label='PF00131.19',
true_label='PF00131.19',
score=60.9,
domain_evalue=1.3e-15,
),
hmmer_utils.HMMEROutput(
sequence_name='E4X7F8_OIKDI/453-561',
predicted_label='PF00131.19',
true_label='PF05033.15',
score=13.9,
domain_evalue=0.6,
),
]
self.assertEqual(actual_hmmsearch, expected_hmmsearch)
# Test that for the output file with no hits, the sentinel value is written.
actual_no_output = list(
hmmer_utils.parse_hmmer_output(_HMMER_TBLOUT_NO_OUTPUT, 'PF00131.19'))
print(actual_no_output)
expected_no_output = [
hmmer_utils.HMMEROutput(
sequence_name='no_sequence/0-0',
predicted_label='PF00131.19',
true_label='PF00000.0',
score=hmmer_utils.NO_SEQUENCE_MATCH_SCORE_SENTINEL,
domain_evalue=hmmer_utils.NO_SEQUENCE_MATCH_DOMAIN_EVALUE_SENTINEL,
)
]
self.assertEqual(actual_no_output, expected_no_output)
def testFilterFastaFileBySequenceName(self):
fasta_file_contents = ('>A0A0F7V1V9_TOXGV/243-280_PF10417.9\n'
'MIREVEKNGGKQVCPANWRRGEKMMHASFEGVKNYLGQ\n'
'>Q1QPP6_NITHX/169-202_PF10417.9\n'
'ALQATMSGQKLAPANWQPGETLLLPADEKTQKDT\n'
'>Q2K9A0_RHIEC/165-202_PF10417.9\n'
'SIQLTAKHQVATPANWNQGEDVIITAAVSNDDAIARFG\n')
input_fasta_file_name = test_util.tmpfile('input_fasta')
with tf.io.gfile.GFile(input_fasta_file_name, 'w') as input_fasta_file:
input_fasta_file.write(fasta_file_contents)
actual = list(
hmmer_utils.filter_fasta_file_by_sequence_name(
input_fasta_file_name, ['Q2K9A0_RHIEC/165-202']))
expected = [('>Q2K9A0_RHIEC/165-202_PF10417.9\n'
'SIQLTAKHQVATPANWNQGEDVIITAAVSNDDAIARFG\n')]
self.assertEqual(actual, expected)
def testAllSequenceNamesFromFastaFile(self):
fasta_file_contents = ('>A0A0F7V1V9_TOXGV/243-280_PF10417.9\n'
'MIREVEKNGGKQVCPANWRRGEKMMHASFEGVKNYLGQ\n'
'>Q1QPP6_NITHX/169-202_PF10417.9\n'
'ALQATMSGQKLAPANWQPGETLLLPADEKTQKDT\n'
'>Q2K9A0_RHIEC/165-202_PF10417.9\n'
'SIQLTAKHQVATPANWNQGEDVIITAAVSNDDAIARFG\n')
input_fasta_file_name = test_util.tmpfile('input_fasta')
with tf.io.gfile.GFile(input_fasta_file_name, 'w') as input_fasta_file:
input_fasta_file.write(fasta_file_contents)
actual = hmmer_utils.all_sequence_names_from_fasta_file(
input_fasta_file_name)
expected = [
'A0A0F7V1V9_TOXGV/243-280', 'Q1QPP6_NITHX/169-202',
'Q2K9A0_RHIEC/165-202'
]
np.testing.assert_array_equal(actual, expected)
def testSequencesWithNoPrediction(self):
hmmer_predictions = pd.DataFrame(
[['A4YXG4_BRASO/106-134', 'PF00001.1', 'PF00001.1', 0.5]],
columns=util.PREDICTION_FILE_COLUMN_NAMES)
all_sequence_names = ['A_DIFFERENTSEQNAME/1-2', 'A4YXG4_BRASO/106-134']
actual = hmmer_utils.sequences_with_no_prediction(
all_sequence_names=all_sequence_names,
hmmer_predictions=hmmer_predictions)
expected = {'A_DIFFERENTSEQNAME/1-2'}
self.assertEqual(actual, expected)
def testYieldTopElByScoreForEachSequenceName(self):
input_df = pd.DataFrame([
['SAME_SEQ_NAME', 'PF00264.20', 'PF000264.20', 10.1, 1e-3],
['SAME_SEQ_NAME', 'PF00264.20', 'PF00001.3', -65432.0, 100.],
],
columns=hmmer_utils.HMMER_OUTPUT_CSV_COLUMN_HEADERS)
actual = pd.concat(
list(
hmmer_utils.yield_top_el_by_score_for_each_sequence_name(input_df)))
expected = pd.DataFrame(
[['SAME_SEQ_NAME', 'PF00264.20', 'PF000264.20', 10.1, 1e-3]],
columns=hmmer_utils.HMMER_OUTPUT_CSV_COLUMN_HEADERS)
self.assertLen(actual, 1)
self.assertCountEqual(
actual.to_dict('records'), expected.to_dict('records'))
# pylint: disable=line-too-long
# Disable line-too-long because phmmer output strings are long, and we
# want to paste them verbatim.
@parameterized.named_parameters(
dict(
testcase_name=('multiple seq outputs, no repeats, same families, all '
'identifiers have predictions'),
phmmer_output="""# --- full sequence ---- --- best 1 domain ---- --- domain number estimation ----
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description of target
# ------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- ---------------------
OPSB_HUMAN/51-303_PF00001.20 - OPSB_HUMAN/51-303_PF00001.20 - 2.5e-61 193.4 64.0 1.6e-22 66.2 0.9 8.7 8 1 0 8 8 8 8 -
OPS3_DROME/75-338_PF00001.20 - OPS3_DROME/75-338_PF00001.20 - 2.1e-69 219.9 65.6 3.2e-21 62.0 0.3 9.8 10 1 0 10 10 10 10 -
#
# Program: phmmer
# Version: 3.1b2 (February 2015)
# Pipeline mode: SEARCH
# Query file: /storage/hmm_train/PF00001.20.fasta
# Target file: /storage/hmm_train/PF00001.20.fasta
# Option settings: phmmer -o /dev/null --tblout /dev/stdout -E 10 /storage/hmm_train/PF00001.20.fasta /storage/hmm_train/PF00001.20.fasta
# Date: Wed Nov 21 09:26:50 2018
# [ok]
""",
all_identifiers=[
'OPSB_HUMAN/51-303_PF00001.20',
'OPS3_DROME/75-338_PF00001.20',
],
expected=[
hmmer_utils.HMMEROutput(
sequence_name='OPSB_HUMAN/51-303',
true_label='PF00001.20',
predicted_label='PF00001.20',
score=193.4,
domain_evalue=2.5e-61,
),
hmmer_utils.HMMEROutput(
sequence_name='OPS3_DROME/75-338',
true_label='PF00001.20',
predicted_label='PF00001.20',
score=219.9,
domain_evalue=2.1e-69,
)
],
),
dict(
testcase_name=('one seq output, no repeats, same families, some '
'identifiers do not have predictions'),
phmmer_output="""# --- full sequence ---- --- best 1 domain ---- --- domain number estimation ----
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description of target
# ------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- ---------------------
OPSB_HUMAN/51-303_PF00001.20 - OPSB_HUMAN/51-303_PF00001.20 - 2.5e-61 193.4 64.0 1.6e-22 66.2 0.9 8.7 8 1 0 8 8 8 8 -
#
# Program: phmmer
# Version: 3.1b2 (February 2015)
# Pipeline mode: SEARCH
# Query file: /storage/hmm_train/PF00001.20.fasta
# Target file: /storage/hmm_train/PF00001.20.fasta
# Option settings: phmmer -o /dev/null --tblout /dev/stdout -E 10 /storage/hmm_train/PF00001.20.fasta /storage/hmm_train/PF00001.20.fasta
# Date: Wed Nov 21 09:26:50 2018
# [ok]
""",
expected=[
hmmer_utils.HMMEROutput(
sequence_name='OPSB_HUMAN/51-303',
true_label='PF00001.20',
predicted_label='PF00001.20',
score=193.4,
domain_evalue=2.5e-61,
),
hmmer_utils.HMMEROutput(
sequence_name='THIS_ISNOTSEEN/1-111',
true_label='PF09876.5',
predicted_label='PF00000.0',
score=hmmer_utils.NO_SEQUENCE_MATCH_SCORE_SENTINEL,
domain_evalue=hmmer_utils.NO_SEQUENCE_MATCH_DOMAIN_EVALUE_SENTINEL,
)
],
all_identifiers=[
'OPSB_HUMAN/51-303_PF00001.20',
'THIS_ISNOTSEEN/1-111_PF09876.5',
]),
dict(
testcase_name=('one seq output, with repeats, all identifiers have '
'predictions'),
phmmer_output="""# --- full sequence ---- --- best 1 domain ---- --- domain number estimation ----
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description of target
# ------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- ---------------------
SSR1_HUMAN/75-323_PF00001.20 - CXCR5_HUMAN/68-322_PF00001.20 - 1e-06 14.4 0.9 1e-06 14.4 0.9 2.0 2 0 0 2 2 2 2 -
CX3C1_RAT/49-294_PF00001.20 - CXCR5_HUMAN/68-322_PF00001.20 - 1.3e-06 14.1 8.7 4.5e-06 12.3 1.7 2.6 3 0 0 3 3 3 2 -
#
# Program: phmmer
# Version: 3.1b2 (February 2015)
# Pipeline mode: SEARCH
# Query file: /storage/hmm_train/PF00001.20.fasta
# Target file: /storage/hmm_train/PF00001.20.fasta
# Option settings: phmmer -o /dev/null --tblout /dev/stdout -E 10 /storage/hmm_train/PF00001.20.fasta /storage/hmm_train/PF00001.20.fasta
# Date: Wed Nov 21 09:26:50 2018
# [ok]
""",
expected=[
hmmer_utils.HMMEROutput(
sequence_name='CXCR5_HUMAN/68-322',
true_label='PF00001.20',
predicted_label='PF00001.20',
score=14.4,
domain_evalue=1e-6,
),
hmmer_utils.HMMEROutput(
sequence_name='CXCR5_HUMAN/68-322',
true_label='PF00001.20',
predicted_label='PF00001.20',
score=14.1,
domain_evalue=1.3e-6,
)
],
all_identifiers=[
'CXCR5_HUMAN/68-322_PF00001.20',
]),
dict(
testcase_name=('no hits for any input sequence'),
phmmer_output="""
# --- full sequence ---- --- best 1 domain ---- --- domain number estimation ----
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description of target
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- ---------------------
#
# Program: phmmer
# Version: 3.1b2 (February 2015)
# Pipeline mode: SEARCH
# Query file: -
# Target file: all_trainseqs.fasta
# Option settings: phmmer -o /dev/null --tblout /dev/stdout -E 10.0 - all_trainseqs.fasta
# Date: Mon Oct 29 12:45:57 2018
# [ok]
""",
all_identifiers=[
'CXCR5_HUMAN/68-322_PF00001.20',
],
expected=[
hmmer_utils.HMMEROutput(
sequence_name='CXCR5_HUMAN/68-322',
true_label='PF00001.20',
predicted_label=hmmer_utils.NO_SEQUENCE_MATCH_FAMILY_NAME_SENTINEL,
score=hmmer_utils.NO_SEQUENCE_MATCH_SCORE_SENTINEL,
domain_evalue=hmmer_utils.NO_SEQUENCE_MATCH_DOMAIN_EVALUE_SENTINEL,
),
],
),
)
def testParsePhmmer(self, phmmer_output, all_identifiers, expected):
actual = hmmer_utils.parse_phmmer_output(phmmer_output, all_identifiers)
self.assertEqual(actual, expected)
# pylint: enable=line-too-long
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
yask123/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
lukauskas/scipy | scipy/stats/_binned_statistic.py | 26 | 17723 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
return BinnedStatisticResult(medians, edges[0], xy)
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
x_edges : (nx + 1) ndarray
The bin edges along the first dimension.
y_edges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
return BinnedStatistic2dResult(medians, edges[0], edges[1], xy)
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
return BinnedStatisticddResult(result, edges, xy)
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/datasets/tests/test_utils.py | 26 | 1697 | import os
import sys
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://statsmodels.sourceforge.net/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
| bsd-3-clause |
kagayakidan/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Dannnno/odo | odo/backends/tests/test_csv.py | 1 | 12718 | from __future__ import absolute_import, division, print_function
import pytest
import sys
import os
import pandas as pd
import pandas.util.testing as tm
import gzip
import datashape
from datashape import Option, string
from collections import Iterator
from odo.backends.csv import (CSV, append, convert, resource,
csv_to_dataframe, CSV_to_chunks_of_dataframes,
infer_header)
from odo.utils import tmpfile, filetext, filetexts, raises
from odo import (into, append, convert, resource, discover, dshape, Temp,
chunks, odo)
from odo.temp import _Temp
from odo.compatibility import unicode
def test_csv():
with tmpfile('.csv') as fn:
csv = CSV(
fn, dshape='var * {name: string, amount: int}', delimiter=',')
assert csv.dialect['delimiter'] == ','
def test_csv_append():
with tmpfile('.csv') as fn:
csv = CSV(fn, has_header=False)
data = [('Alice', 100), ('Bob', 200)]
append(csv, data)
assert list(convert(Iterator, csv)) == data
with open(fn) as f:
s = f.read()
assert 'Alice' in s
assert '100' in s
def test_pandas_read():
with filetext('Alice,1\nBob,2') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice', 1), ('Bob', 2)]
assert list(df.columns) == ['name', 'amount']
def test_pandas_read_supports_datetimes():
with filetext('Alice,2014-01-02\nBob,2014-01-03') as fn:
ds = datashape.dshape('var * {name: string, when: date}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['name', 'when']
assert df.dtypes['when'] == 'M8[ns]'
def test_pandas_read_supports_missing_integers():
with filetext('Alice,1\nBob,') as fn:
ds = datashape.dshape('var * {name: string, val: ?int32}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['name', 'val']
assert df.dtypes['val'] == 'f4'
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[0] < 3,
reason="Doesn't work on Windows")
def test_pandas_read_supports_gzip():
with filetext('Alice,1\nBob,2', open=gzip.open,
mode='wt', extension='.csv.gz') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice', 1), ('Bob', 2)]
assert list(df.columns) == ['name', 'amount']
def test_pandas_read_supports_read_csv_kwargs():
with filetext('Alice,1\nBob,2') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds, usecols=['name'])
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice',), ('Bob',)]
def test_pandas_write():
with tmpfile('.csv') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn, has_header=True)
append(csv, data, dshape=ds)
with open(fn) as f:
assert 'name' in f.read()
# Doesn't write header twice
append(csv, data, dshape=ds)
with open(fn) as f:
s = f.read()
assert s.count('name') == 1
def test_pandas_writes_header_by_default():
with tmpfile('.csv') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn)
append(csv, data, dshape=ds)
with open(fn) as f:
assert 'name' in f.read()
@pytest.mark.xfail(sys.version_info[0] == 3, reason="Doesn't work on Python 3")
def test_pandas_write_gzip():
with tmpfile('.csv.gz') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn, has_header=True)
append(csv, data, dshape=ds)
f = gzip.open(fn)
s = f.read()
assert 'name' in s
assert 'Alice,1' in s
f.close()
def test_pandas_loads_in_datetimes_naively():
with filetext('name,when\nAlice,2014-01-01\nBob,2014-02-02') as fn:
csv = CSV(fn, has_header=True)
ds = datashape.dshape('var * {name: ?string, when: ?datetime}')
assert discover(csv) == ds
df = convert(pd.DataFrame, csv)
assert df.dtypes['when'] == 'M8[ns]'
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[0] < 3,
reason="Doesn't work on Windows")
def test_pandas_discover_on_gzipped_files():
with filetext('name,when\nAlice,2014-01-01\nBob,2014-02-02',
open=gzip.open, mode='wt', extension='.csv.gz') as fn:
csv = CSV(fn, has_header=True)
ds = datashape.dshape('var * {name: ?string, when: ?datetime}')
assert discover(csv) == ds
def test_csv_into_list():
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
L = into(list, fn)
assert L == [('Alice', 100), ('Bob', 200)]
def test_discover_csv_files_without_header():
with filetext('Alice,2014-01-01\nBob,2014-02-02') as fn:
csv = CSV(fn, has_header=False)
df = convert(pd.DataFrame, csv)
assert len(df) == 2
assert 'Alice' not in list(df.columns)
def test_discover_csv_yields_string_on_totally_empty_columns():
expected = dshape('var * {a: int64, b: ?string, c: int64}')
with filetext('a,b,c\n1,,3\n4,,6\n7,,9') as fn:
csv = CSV(fn, has_header=True)
assert discover(csv) == expected
def test_glob():
d = {'accounts1.csv': 'name,when\nAlice,100\nBob,200',
'accounts2.csv': 'name,when\nAlice,300\nBob,400'}
with filetexts(d) as fns:
r = resource('accounts*.csv', has_header=True)
assert convert(list, r) == [('Alice', 100), ('Bob', 200),
('Alice', 300), ('Bob', 400)]
r = resource('*.csv')
assert isinstance(r, chunks(CSV))
def test_pandas_csv_naive_behavior_results_in_columns():
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
with tmpfile('.csv') as fn:
into(fn, df)
with open(fn) as f:
assert next(f).strip() == 'id,name,amount'
def test_discover_csv_without_columns():
with filetext('Alice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
ds = discover(csv)
assert '100' not in str(ds)
def test_header_argument_set_with_or_without_header():
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
assert into(list, fn) == [('Alice', 100), ('Bob', 200)]
with filetext('Alice,100\nBob,200', extension='csv') as fn:
assert into(list, fn) == [('Alice', 100), ('Bob', 200)]
def test_first_csv_establishes_consistent_dshape():
d = {'accounts1.csv': 'name,when\nAlice,one\nBob,two',
'accounts2.csv': 'name,when\nAlice,300\nBob,400'}
with filetexts(d) as fns:
result = into(list, 'accounts*.csv')
assert len(result) == 4
assert all(isinstance(val, (str, unicode)) for name, val in result)
def test_discover_csv_with_spaces_in_header():
with filetext(' name, val\nAlice,100\nBob,200', extension='csv') as fn:
ds = discover(CSV(fn, has_header=True))
assert ds.measure.names == ['name', 'val']
def test_header_disagrees_with_dshape():
ds = datashape.dshape('var * {name: string, bal: int64}')
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn, header=True)
assert convert(list, csv) == [('Alice', 100), ('Bob', 200)]
assert list(convert(pd.DataFrame, csv).columns) == ['name', 'val']
assert list(convert(pd.DataFrame, csv, dshape=ds).columns) == [
'name', 'bal']
def test_raise_errors_quickly_on_into_chunks_dataframe():
with filetext('name,val\nAlice,100\nBob,foo', extension='csv') as fn:
ds = datashape.dshape('var * {name: string, val: int}')
csv = CSV(fn, header=True)
assert raises(Exception,
lambda: CSV_to_chunks_of_dataframes(csv, dshape=ds))
def test_unused_datetime_columns():
ds = datashape.dshape('var * {val: string, when: datetime}')
with filetext("val,when\na,2000-01-01\nb,2000-02-02") as fn:
csv = CSV(fn, has_header=True)
assert convert(list, csv_to_dataframe(csv, usecols=['val'],
squeeze=True, dshape=ds)) == ['a', 'b']
def test_empty_dataframe():
with filetext('name,val', extension='csv') as fn:
csv = CSV(fn, has_header=True)
df = convert(pd.DataFrame, csv)
assert isinstance(df, pd.DataFrame)
def test_csv_missing_values():
with filetext('name,val\nAlice,100\nNA,200', extension='csv') as fn:
csv = CSV(fn)
assert discover(csv).measure.dict['name'] == Option(string)
def test_csv_separator_header():
with filetext('a|b|c\n1|2|3\n4|5|6', extension='csv') as fn:
csv = CSV(fn, delimiter='|', has_header=True)
assert convert(list, csv) == [(1, 2, 3), (4, 5, 6)]
df = pd.DataFrame([['Alice', 100],
['Bob', 200],
['Charlie', 300]],
columns=['name', 'balance'])
def test_temp_csv():
csv = into(Temp(CSV)('_test_temp_csv.csv'), df)
assert isinstance(csv, CSV)
assert into(list, csv) == into(list, df)
del csv
import gc
gc.collect()
assert not os.path.exists('_test_temp_csv.csv')
def test_convert_to_csv():
csv = into(Temp(CSV), df)
assert isinstance(csv, CSV)
assert into(list, csv) == into(list, df)
assert isinstance(csv, _Temp)
def test_unicode_column_names():
with filetext(b'f\xc3\xbc,a\n1,2\n3,4', extension='csv', mode='wb') as fn:
df = into(pd.DataFrame, CSV(fn, has_header=True))
expected = pd.DataFrame([(1, 2), (3, 4)],
columns=[b'f\xc3\xbc'.decode('utf8'), u'a'])
tm.assert_frame_equal(df, expected)
def test_more_unicode_column_names():
with filetext(b'foo\xc4\x87,a\n1,2\n3,4', extension='csv',
mode='wb') as fn:
df = into(pd.DataFrame, CSV(fn, has_header=True))
expected = pd.DataFrame([(1, 2), (3, 4)],
columns=[b'foo\xc4\x87'.decode('utf8'), u'a'])
tm.assert_frame_equal(df, expected)
def test_infer_header():
with filetext('name,val\nAlice,100\nNA,200', extension='csv') as fn:
assert infer_header(CSV(fn).path, 100) == True
with filetext('Alice,100\nNA,200', extension='csv') as fn:
assert infer_header(CSV(fn).path, 100) == False
def test_csv_supports_sep():
assert CSV('foo.csv', sep=';').dialect['delimiter'] == ';'
def test_csv_to_compressed_csv():
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('a,1\nb,2\nc,3')
with tmpfile('.csv.gz') as gfn:
result = odo(fn, gfn)
assert odo(result, list) == odo(fn, list)
def test_has_header_on_tsv():
with tmpfile('.csv') as fn:
with open(fn, 'wb') as f:
f.write(b'a\tb\n1\t2\n3\t4')
csv = CSV(fn)
assert csv.has_header
def test_header_with_quotes():
csv = CSV(os.path.join(os.path.dirname(__file__), 'encoding.csv'),
encoding='latin1')
expected = dshape("""var * {
D_PROC: ?string,
NUM_SEQ: int64,
COD_TIP_RELAC: ?float64,
COMPL: ?string,
COD_ASSUNTO: int64
}
""")
assert discover(csv) == expected
def test_encoding_is_none():
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('a,1\nb,2\nc,3'.encode('utf-8').decode('utf-8'))
assert CSV(fn, encoding=None).encoding == 'utf-8'
def test_discover_with_dotted_names():
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('a.b,c.d\n1,2\n3,4')
dshape = discover(resource(fn))
assert dshape == datashape.dshape('var * {"a.b": int64, "c.d": int64}')
assert dshape.measure.names == [u'a.b', u'c.d']
| bsd-3-clause |
bfelbo/deepmoji | deepmoji/finetuning.py | 2 | 23552 | """ Finetuning functions for doing transfer learning to new datasets.
"""
from __future__ import print_function
import sys
import uuid
from time import sleep
import h5py
import math
import pickle
import numpy as np
from keras.layers.wrappers import Bidirectional, TimeDistributed
from sklearn.metrics import f1_score
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from keras.models import model_from_json
from global_variables import (
FINETUNING_METHODS,
FINETUNING_METRICS,
WEIGHTS_DIR)
from tokenizer import tokenize
from sentence_tokenizer import SentenceTokenizer
from attlayer import AttentionWeightedAverage
def load_benchmark(path, vocab, extend_with=0):
""" Loads the given benchmark dataset.
Tokenizes the texts using the provided vocabulary, extending it with
words from the training dataset if extend_with > 0. Splits them into
three lists: training, validation and testing (in that order).
Also calculates the maximum length of the texts and the
suggested batch_size.
# Arguments:
path: Path to the dataset to be loaded.
vocab: Vocabulary to be used for tokenizing texts.
extend_with: If > 0, the vocabulary will be extended with up to
extend_with tokens from the training set before tokenizing.
# Returns:
A dictionary with the following fields:
texts: List of three lists, containing tokenized inputs for
training, validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
added: Number of tokens added to the vocabulary.
batch_size: Batch size.
maxlen: Maximum length of an input.
"""
# Pre-processing dataset
with open(path) as dataset:
data = pickle.load(dataset)
# Decode data
try:
texts = [unicode(x) for x in data['texts']]
except UnicodeDecodeError:
texts = [x.decode('utf-8') for x in data['texts']]
# Extract labels
labels = [x['label'] for x in data['info']]
batch_size, maxlen = calculate_batchsize_maxlen(texts)
st = SentenceTokenizer(vocab, maxlen)
# Split up dataset. Extend the existing vocabulary with up to extend_with
# tokens from the training dataset.
texts, labels, added = st.split_train_val_test(texts,
labels,
[data['train_ind'],
data['val_ind'],
data['test_ind']],
extend_with=extend_with)
return {'texts': texts,
'labels': labels,
'added': added,
'batch_size': batch_size,
'maxlen': maxlen}
def calculate_batchsize_maxlen(texts):
""" Calculates the maximum length in the provided texts and a suitable
batch size. Rounds up maxlen to the nearest multiple of ten.
# Arguments:
texts: List of inputs.
# Returns:
Batch size,
max length
"""
def roundup(x):
return int(math.ceil(x / 10.0)) * 10
# Calculate max length of sequences considered
# Adjust batch_size accordingly to prevent GPU overflow
lengths = [len(tokenize(t)) for t in texts]
maxlen = roundup(np.percentile(lengths, 80.0))
batch_size = 250 if maxlen <= 100 else 50
return batch_size, maxlen
def finetuning_callbacks(checkpoint_path, patience, verbose):
""" Callbacks for model training.
# Arguments:
checkpoint_path: Where weight checkpoints should be saved.
patience: Number of epochs with no improvement after which
training will be stopped.
# Returns:
Array with training callbacks that can be passed straight into
model.fit() or similar.
"""
cb_verbose = (verbose >= 2)
checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
save_best_only=True, verbose=cb_verbose)
earlystop = EarlyStopping(monitor='val_loss', patience=patience,
verbose=cb_verbose)
return [checkpointer, earlystop]
def freeze_layers(model, unfrozen_types=[], unfrozen_keyword=None):
""" Freezes all layers in the given model, except for ones that are
explicitly specified to not be frozen.
# Arguments:
model: Model whose layers should be modified.
unfrozen_types: List of layer types which shouldn't be frozen.
unfrozen_keyword: Name keywords of layers that shouldn't be frozen.
# Returns:
Model with the selected layers frozen.
"""
for l in model.layers:
if len(l.trainable_weights):
trainable = (type(l) in unfrozen_types or
(unfrozen_keyword is not None and unfrozen_keyword in l.name))
change_trainable(l, trainable, verbose=False)
return model
def change_trainable(layer, trainable, verbose=False):
""" Helper method that fixes some of Keras' issues with wrappers and
trainability. Freezes or unfreezes a given layer.
# Arguments:
layer: Layer to be modified.
trainable: Whether the layer should be frozen or unfrozen.
verbose: Verbosity flag.
"""
layer.trainable = trainable
if type(layer) == Bidirectional:
layer.backward_layer.trainable = trainable
layer.forward_layer.trainable = trainable
if type(layer) == TimeDistributed:
layer.backward_layer.trainable = trainable
if verbose:
action = 'Unfroze' if trainable else 'Froze'
print("{} {}".format(action, layer.name))
def find_f1_threshold(y_val, y_pred_val, y_test, y_pred_test,
average='binary'):
""" Choose a threshold for F1 based on the validation dataset
(see https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4442797/
for details on why to find another threshold than simply 0.5)
# Arguments:
y_val: Outputs of the validation dataset.
y_pred_val: Predicted outputs of the validation dataset.
y_test: Outputs of the testing dataset.
y_pred_test: Predicted outputs of the testing dataset.
# Returns:
F1 score for the given data and
the corresponding F1 threshold
"""
thresholds = np.arange(0.01, 0.5, step=0.01)
f1_scores = []
for t in thresholds:
y_pred_val_ind = (y_pred_val > t)
f1_val = f1_score(y_val, y_pred_val_ind, average=average)
f1_scores.append(f1_val)
best_t = thresholds[np.argmax(f1_scores)]
y_pred_ind = (y_pred_test > best_t)
f1_test = f1_score(y_test, y_pred_ind, average=average)
return f1_test, best_t
def relabel(y, current_label_nr, nb_classes):
""" Makes a binary classification for a specific class in a
multi-class dataset.
# Arguments:
y: Outputs to be relabelled.
current_label_nr: Current label number.
nb_classes: Total number of classes.
# Returns:
Relabelled outputs of a given multi-class dataset into a binary
classification dataset.
"""
# Handling binary classification
if nb_classes == 2 and len(y.shape) == 1:
return y
y_new = np.zeros(len(y))
y_cut = y[:, current_label_nr]
label_pos = np.where(y_cut == 1)[0]
y_new[label_pos] = 1
return y_new
def sampling_generator(X_in, y_in, batch_size, epoch_size=25000,
upsample=False, seed=42):
""" Returns a generator that enables larger epochs on small datasets and
has upsampling functionality.
# Arguments:
X_in: Inputs of the given dataset.
y_in: Outputs of the given dataset.
batch_size: Batch size.
epoch_size: Number of samples in an epoch.
upsample: Whether upsampling should be done. This flag should only be
set on binary class problems.
seed: Random number generator seed.
# Returns:
Sample generator.
"""
np.random.seed(seed)
if upsample:
# Should only be used on binary class problems
assert len(y_in.shape) == 1
neg = np.where(y_in == 0)[0]
pos = np.where(y_in == 1)[0]
assert epoch_size % 2 == 0
samples_pr_class = int(epoch_size / 2)
else:
ind = range(len(X_in))
# Keep looping until training halts
while True:
if not upsample:
# Randomly sample observations in a balanced way
sample_ind = np.random.choice(ind, epoch_size, replace=True)
X, y = X_in[sample_ind], y_in[sample_ind]
else:
# Randomly sample observations in a balanced way
sample_neg = np.random.choice(neg, samples_pr_class, replace=True)
sample_pos = np.random.choice(pos, samples_pr_class, replace=True)
X = np.concatenate((X_in[sample_neg], X_in[sample_pos]), axis=0)
y = np.concatenate((y_in[sample_neg], y_in[sample_pos]), axis=0)
# Shuffle to avoid labels being in specific order
# (all negative then positive)
p = np.random.permutation(len(X))
X, y = X[p], y[p]
label_dist = np.mean(y)
assert(label_dist > 0.45)
assert(label_dist < 0.55)
# Hand-off data using batch_size
for i in range(int(epoch_size / batch_size)):
start = i * batch_size
end = min(start + batch_size, epoch_size)
yield (X[start:end], y[start:end])
def finetune(model, texts, labels, nb_classes, batch_size, method,
metric='acc', epoch_size=5000, nb_epochs=1000,
error_checking=True, verbose=1):
""" Compiles and finetunes the given model.
# Arguments:
model: Model to be finetuned
texts: List of three lists, containing tokenized inputs for training,
validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
nb_classes: Number of classes in the dataset.
batch_size: Batch size.
method: Finetuning method to be used. For available methods, see
FINETUNING_METHODS in global_variables.py.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs. Doesn't matter much as early stopping is used.
metric: Evaluation metric to be used. For available metrics, see
FINETUNING_METRICS in global_variables.py.
error_checking: If set to True, warnings will be printed when the label
list has the wrong dimensions.
verbose: Verbosity flag.
# Returns:
Model after finetuning,
score after finetuning using the provided metric.
"""
if method not in FINETUNING_METHODS:
raise ValueError('ERROR (finetune): Invalid method parameter. '
'Available options: {}'.format(FINETUNING_METHODS))
if metric not in FINETUNING_METRICS:
raise ValueError('ERROR (finetune): Invalid metric parameter. '
'Available options: {}'.format(FINETUNING_METRICS))
(X_train, y_train) = (texts[0], labels[0])
(X_val, y_val) = (texts[1], labels[1])
(X_test, y_test) = (texts[2], labels[2])
checkpoint_path = '{}/deepmoji-checkpoint-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
# Check dimension of labels
if error_checking:
for ls in [y_train, y_val, y_test]:
if not ls.ndim == 1:
print('WARNING (finetune): The dimension of the '
'provided label list does not match the expected '
'value. When using the \'{}\' metric, the labels '
'should be a 1-dimensional array. '
'Input shape was {}'.format(metric, ls.shape))
break
if method in ['last', 'new']:
lr = 0.001
elif method in ['full', 'chain-thaw']:
lr = 0.0001
loss = 'binary_crossentropy' if nb_classes <= 2 \
else 'categorical_crossentropy'
# Freeze layers if using last
if method == 'last':
model = freeze_layers(model, unfrozen_keyword='softmax')
# Compile model, for chain-thaw we compile it later (after freezing)
if method != 'chain-thaw':
adam = Adam(clipnorm=1, lr=lr)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
# Training
if verbose:
print('Method: {}'.format(method))
print('Metric: {}'.format(metric))
print('Classes: {}'.format(nb_classes))
if method == 'chain-thaw':
result = chain_thaw(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
batch_size=batch_size, loss=loss,
epoch_size=epoch_size,
nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_path,
evaluate=metric, verbose=verbose)
else:
result = tune_trainable(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
epoch_size=epoch_size,
nb_epochs=nb_epochs,
batch_size=batch_size,
checkpoint_weight_path=checkpoint_path,
evaluate=metric, verbose=verbose)
return model, result
def tune_trainable(model, nb_classes, train, val, test, epoch_size,
nb_epochs, batch_size, checkpoint_weight_path,
patience=5, evaluate='acc', verbose=1):
""" Finetunes the given model using the accuracy measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
patience: Patience for callback methods.
evaluate: Evaluation method to use. Can be 'acc' or 'weighted_f1'.
verbose: Verbosity flag.
# Returns:
Accuracy of the trained model, ONLY if 'evaluate' is set.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
if nb_classes > 2:
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
if verbose:
print("Trainable weights: {}".format(model.trainable_weights))
print("Training..")
# Use sample generator for fixed-size epoch
train_gen = sampling_generator(X_train, y_train,
batch_size, upsample=False)
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose)
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
epochs=nb_epochs,
validation_data=(X_val, y_val),
validation_steps=steps,
callbacks=callbacks, verbose=(verbose >= 2))
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
if verbose >= 2:
print("Loaded weights from {}".format(checkpoint_weight_path))
if evaluate == 'acc':
return evaluate_using_acc(model, X_test, y_test, batch_size=batch_size)
elif evaluate == 'weighted_f1':
return evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size=batch_size)
def evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size):
""" Evaluation function using macro weighted F1 score.
# Arguments:
model: Model to be evaluated.
X_test: Inputs of the testing set.
y_test: Outputs of the testing set.
X_val: Inputs of the validation set.
y_val: Outputs of the validation set.
batch_size: Batch size.
# Returns:
Weighted F1 score of the given model.
"""
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
f1_test, _ = find_f1_threshold(y_val, y_pred_val, y_test, y_pred_test,
average='weighted_f1')
return f1_test
def evaluate_using_acc(model, X_test, y_test, batch_size):
""" Evaluation function using accuracy.
# Arguments:
model: Model to be evaluated.
X_test: Inputs of the testing set.
y_test: Outputs of the testing set.
batch_size: Batch size.
# Returns:
Accuracy of the given model.
"""
_, acc = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
return acc
def chain_thaw(model, nb_classes, train, val, test, batch_size,
loss, epoch_size, nb_epochs, checkpoint_weight_path,
patience=5,
initial_lr=0.001, next_lr=0.0001, seed=None,
verbose=1, evaluate='acc'):
""" Finetunes given model using chain-thaw and evaluates using accuracy.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
evaluate: Evaluation method to use. Can be 'acc' or 'weighted_f1'.
# Returns:
Accuracy of the finetuned model.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
if nb_classes > 2:
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
if verbose:
print('Training..')
# Use sample generator for fixed-size epoch
train_gen = sampling_generator(X_train, y_train, batch_size,
upsample=False, seed=seed)
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose)
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_data=(X_val, y_val), loss=loss, callbacks=callbacks,
epoch_size=epoch_size, nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_weight_path,
batch_size=batch_size, verbose=verbose)
if evaluate == 'acc':
return evaluate_using_acc(model, X_test, y_test, batch_size=batch_size)
elif evaluate == 'weighted_f1':
return evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size=batch_size)
def train_by_chain_thaw(model, train_gen, val_data, loss, callbacks, epoch_size,
nb_epochs, checkpoint_weight_path, batch_size,
initial_lr=0.001, next_lr=0.0001, verbose=1):
""" Finetunes model using the chain-thaw method.
This is done as follows:
1) Freeze every layer except the last (softmax) layer and train it.
2) Freeze every layer except the first layer and train it.
3) Freeze every layer except the second etc., until the second last layer.
4) Unfreeze all layers and train entire model.
# Arguments:
model: Model to be trained.
train_gen: Training sample generator.
val_data: Validation data.
loss: Loss function to be used.
callbacks: Training callbacks to be used.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Where weight checkpoints should be saved.
batch_size: Batch size.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
verbose: Verbosity flag.
"""
# Get trainable layers
layers = [layer for layer in model.layers
if len(layer.trainable_weights)]
# Bring last layer to front
layers.insert(0, layers.pop(len(layers) - 1))
# Add None to the end to signify finetuning all layers
layers.append(None)
lr = None
# Finetune each layer one by one and finetune all of them at once
# at the end
for layer in layers:
if lr is None:
lr = initial_lr
elif lr == initial_lr:
lr = next_lr
adam = Adam(clipnorm=1, lr=lr)
# Freeze all except current layer
for _layer in layers:
if _layer is not None:
trainable = _layer == layer or layer is None
change_trainable(_layer, trainable=trainable, verbose=False)
# Verify we froze the right layers
for _layer in model.layers:
if _layer is not None and len(_layer.trainable_weights):
assert _layer.trainable == (_layer == layer) or layer is None
model.cache = False
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
model.cache = True
if verbose:
if layer is None:
print('Finetuning all layers')
else:
print('Finetuning {}'.format(layer.name))
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
epochs=nb_epochs, validation_data=val_data,
callbacks=callbacks, verbose=(verbose >= 2))
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
if verbose >= 2:
print("Loaded weights from {}".format(checkpoint_weight_path))
| mit |
johndpope/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 89 | 8340 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
aaichsmn/tacc_stats | setup.py | 1 | 18706 | #!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
import sys
import shutil
import warnings
import re
import ConfigParser
import multiprocessing
from setuptools import setup, Command, find_packages
from setuptools.command.build_ext import build_ext
setuptools_kwargs = {}
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.bdist_rpm import bdist_rpm
from distutils.command.sdist import sdist
from os.path import join as pjoin
DESCRIPTION = ("A job-level performance monitoring and analysis package for \
High Performance Computing Platforms")
LONG_DESCRIPTION = """
TACC Stats unifies and extends the measurements taken by Linux monitoring utilities such as systat/SAR, iostat, etc.~and resolves measurements by job and hardware device so that individual job/applications can be analyzed separately. It also provides a set of analysis and reporting tools which analyze TACC Stats resource use data and report jobs/applications with low resource use efficiency. TACC Stats initializes at the beginning of a job and collects data at specified intervals during job execution and at the end of a job. When executed at the default interval (every 10 minutes), the overhead is less than 0.1\%. This low overhead enables TACC Stats to be active on all nodes at all times. This data can then be used to generate analyses and reports such as average cycles per instruction (CPI), average and peak memory use, average and peak memory bandwidth use, and more on each job and over arbitrary sets of jobs. These reports enable systematic identification of jobs or application codes which could benefit from architectural adaptation and performance tuning or catch user mistakes such as allocating multiple nodes to a single-node shared-memory parallelized application.
"""
DISTNAME = 'tacc_stats'
LICENSE = 'LGPL'
AUTHOR = "Texas Advanced Computing Center"
EMAIL = "rtevans@tacc.utexas.edu"
URL = "http://www.tacc.utexas.edu"
DOWNLOAD_URL = 'https://github.com/TACC/tacc_stats'
CLASSIFIERS = [
'Development Status :: 1 - Beta',
'Environment :: Console',
'Operating System :: Linux',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Scientific/Engineering',
]
MAJOR = 2
MINOR = 1
MICRO = 1
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('tacc_stats/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing tacc_stats/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev ="v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'tacc_stats', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
def read_site_cfg():
config = ConfigParser.ConfigParser()
cfg_filename = os.path.abspath('setup.cfg')
config.read(cfg_filename)
options = dict(config.items('OPTIONS'))
config = ConfigParser.ConfigParser()
site_cfg = os.path.abspath(os.path.join('cfg',options['site_cfg']))
config.read(site_cfg)
paths = dict(config.items('PATHS'))
types = dict(config.items('TYPES'))
return paths,types,options
def write_stats_x(types):
filename = os.path.join(
os.path.dirname(__file__), 'tacc_stats','src','monitor', 'stats.x')
a = open(filename, 'w')
import operator
try:
for t,val in sorted(types.iteritems(), key=operator.itemgetter(0)):
if val == 'True':
print '>>>>>>>>>>>>>>>>>>>>>>',t,val
a.write('X('+t+') ')
finally:
a.write('\n')
a.close()
def write_cfg_file(paths):
filename = pjoin(os.path.dirname(__file__), 'tacc_stats',
'cfg.py')
a = open(filename, 'w')
try:
for name,path in paths.iteritems():
a.write(name + " = " + "\"" + path + "\"" + "\n")
a.write("seek = 0\n")
finally:
a.close()
def cfg_sh(filename_in,paths):
f = open(filename_in, 'r').read()
for name,path in paths.iteritems():
f = f.replace(name,path)
a = open(pjoin(os.path.dirname(__file__),'tacc_stats',
os.path.basename(filename_in.split('.in')[0])), 'w')
a.write(f)
a.close()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = []
for root, dirs, files in os.walk('tacc_stats'):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo', '.x',
'.pyd'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in os.listdir(os.getcwd()):
if '.egg' in d:
self._clean_trees.append(d)
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
paths,types,options = read_site_cfg()
write_stats_x(types)
write_cfg_file(paths)
root='tacc_stats/src/monitor/'
sources=[
pjoin(root,'schema.c'),pjoin(root,'dict.c'),pjoin(root,'collect.c'),
pjoin(root,"pci_busid_map.c"),
pjoin(root,'stats_file.c'),pjoin(root,'stats_buffer.c'),pjoin(root,'stats.c')
]
RMQ = False
if options['rmq'] == 'True':
RMQ = True
if RMQ: sources.append(pjoin(root,'amqp_listen.c'))
MODE = options['mode']
if MODE == "DAEMON":
print "Building a monitoring daemon."
sources.append(pjoin(root,'monitor.c'))
elif MODE == "CRON":
print "Building an executable to be called by cron."
sources.append(pjoin(root,'main.c'))
else:
print "BUILD ERROR: Set mode to either DAEMON or CRON"
SERVER = options['server']
FREQUENCY = options['frequency']
for root,dirs,files in os.walk('tacc_stats/src/monitor/'):
for f in files:
name,ext = os.path.splitext(f)
if ext == '.c' and name in types.keys():
if types[name] == 'True':
sources.append(pjoin(root,f))
include_dirs = []
library_dirs = []
libraries = []
if types['ib'] == 'True' or types['ib_sw'] == 'True' or types['ib_ext'] == 'True':
include_dirs=['/opt/ofed/include']
library_dirs=['/opt/ofed/lib64']
libraries=['ibmad']
if types['mic'] == 'True':
library_dirs += ['/usr/lib64']
libraries += ['scif', 'micmgmt']
if types['llite'] == 'True' or types ['lnet'] == 'True' or \
types['mdc'] == 'True' or types['osc'] == 'True':
sources.append('tacc_stats/src/monitor/lustre_obd_to_mnt.c')
define_macros=[('STATS_DIR_PATH','\"'+paths['stats_dir']+'\"'),
('STATS_VERSION','\"'+VERSION+'\"'),
('STATS_PROGRAM','\"tacc_stats\"'),
('STATS_LOCK_PATH','\"'+paths['stats_lock']+'\"'),
('JOBID_FILE_PATH','\"'+paths['jobid_file']+'\"'),
('HOST_NAME_EXT','\"'+paths['host_name_ext']+'\"'),
('FREQUENCY',FREQUENCY)]
if RMQ:
define_macros.append(('RMQ',True))
libraries.append("rabbitmq")
flags = ['-D_GNU_SOURCE', '-Wp,-U_FORTIFY_SOURCE',
'-O3', '-Wall', '-g', '-UDEBUG']
ext_data=dict(sources=sources,
include_dirs=['tacc_stats/src/monitor/'] + include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
libraries=libraries,
extra_compile_args = flags,
define_macros=define_macros
)
extensions = []
cmd = {}
class MyBDist_RPM(bdist_rpm):
# Just a Python distutils bug fix.
# Very frustrating, rpms cannot build with extensions
# without this hack.
def run(self):
try:
from distutils.sysconfig import get_python_version
import __builtin__
__builtin__.get_python_version = get_python_version
except:
# Supposedly corrected in Python3 where __builtin__ -> builtin
pass
bdist_rpm.run(self)
# Make the spec file my way!
def initialize_options(self):
bdist_rpm.initialize_options(self)
try: os.stat('build')
except: os.mkdir('build')
self.prep_script = "build/bdist_rpm_prep"
prep = """
%define _bindir /opt/%{name}
%define crontab_file /etc/cron.d/%{name}
"""
if RMQ:
prep += "%define server " + "-s "+SERVER
else:
prep += "%define server \"\""
if MODE == "DAEMON":
prep += "\n%define pidfile " + paths['stats_lock']
if MODE == "CRON":
prep += """
%define stats_dir /var/log/%{name}
%define archive_dir /scratch/projects/%{name}/archive
"""
prep += """
%setup -n %{name}-%{unmangled_version}
"""
open(self.prep_script,"w").write(prep)
self.build_script = "build/bdist_rpm_build"
build_cmds = """
rm -rf %{buildroot}
python setup.py build_ext
"""
open(self.build_script,"w").write(build_cmds)
self.install_script = "build/bdist_rpm_install"
install_cmds = """
install -m 0755 -d %{buildroot}/%{_bindir}
install -m 6755 build/bin/monitor %{buildroot}/%{_bindir}/%{name}
echo %{_bindir}/%{name} >> %{_builddir}/%{name}-%{unmangled_version}/INSTALLED_FILES
"""
if MODE == "CRON":
install_cmds += """
install -m 0755 tacc_stats/archive.sh %{buildroot}/%{_bindir}/archive
echo %{_bindir}/archive >> %{_builddir}/%{name}-%{unmangled_version}/INSTALLED_FILES
"""
if MODE == "DAEMON":
install_cmds += """
install -m 0755 tacc_stats/taccstats %{buildroot}/%{_bindir}/taccstats
echo %{_bindir}/taccstats >> %{_builddir}/%{name}-%{unmangled_version}/INSTALLED_FILES
"""
open(self.install_script,"w").write(install_cmds)
self.clean_script = None
self.verify_script = None
self.post_install = "build/bdist_rpm_postinstall"
if MODE == "CRON":
post_install_cmds = """
(
archive_min=$(( ((RANDOM * 60) / 32768) %% 60 ))
archive_hour=$(( (RANDOM %% 2) + 2 ))
echo \"MAILTO=\\"\\"\"
echo \"*/10 * * * * root %{_bindir}/%{name} collect %{server}\"
echo \"55 23 * * * root %{_bindir}/%{name} rotate %{server}\"
"""
self.pre_uninstall = "build/bdist_rpm_preuninstall"
open(self.pre_uninstall,"w").write("""
if [ $1 == 0 ]; then
rm %{crontab_file} || :
fi
""")
if not RMQ:
post_install_cmds += """
echo \"${archive_min} ${archive_hour} * * * root %{_bindir}/archive %{stats_dir} %{archive_dir}\"
"""
post_install_cmds += """
) > %{crontab_file}
/sbin/service crond restart || :
%{_bindir}/%{name} rotate %{server}
"""
if MODE == "DAEMON":
post_install_cmds = """
cp %{_bindir}/taccstats /etc/init.d/
chkconfig --add taccstats
/sbin/service taccstats restart
"""
open(self.post_install,"w").write(post_install_cmds)
self.pre_install = None
self.post_uninstall = "build/bdist_rpm_post_uninstall"
if MODE == "DAEMON":
post_uninstall_cmds = """
/sbin/service taccstats stop
chkconfig --del taccstats
rm /etc/init.d/taccstats
"""
post_uninstall_cmds = """ """
#rm -rf %{_bindir}
#"""
open(self.post_uninstall,"w").write(post_uninstall_cmds)
# Make executable
# C extensions
class MyBuildExt(build_ext):
def build_extension(self,ext):
sources = ext.sources
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
extra_postargs=extra_args,
depends=ext.depends)
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
if RMQ:
self.compiler.link_executable([pjoin(self.build_temp,
'tacc_stats','src','monitor',
'amqp_listen.o')],
'build/bin/amqp_listend',
libraries=ext.libraries,
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
target_lang=language)
objects.remove(pjoin(self.build_temp,'tacc_stats','src',
'monitor','amqp_listen.o'))
self.compiler.link_executable(objects,
'build/bin/monitor',
libraries=ext.libraries,
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
target_lang=language)
self.compiler.link_shared_object(objects,
ext_path,
libraries=ext.libraries,
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
extensions.append(Extension('tacc_stats.monitor', **ext_data))
if not RMQ:
cfg_sh(pjoin(os.path.dirname(__file__), 'tacc_stats',
'src','monitor','archive.sh.in'),paths)
if MODE == "DAEMON":
cfg_sh(pjoin(os.path.dirname(__file__), 'tacc_stats',
'src','monitor','taccstats.in'),dict(paths.items()+options.items()))
scripts=['build/bin/monitor',
'tacc_stats/analysis/job_sweeper.py',
'tacc_stats/analysis/job_plotter.py',
'tacc_stats/site/machine/update_db.py',
'tacc_stats/site/machine/update_thresholds.py',
'tacc_stats/site/machine/thresholds.cfg',
'tacc_stats/pickler/job_pickles.py']
if RMQ: scripts += ['build/bin/amqp_listend']
if MODE == "CRON":
scripts += ['tacc_stats/archive.sh']
package_data = {'' : ['*.sh.in'] },
else:
DISTNAME += "d"
scripts += ['tacc_stats/taccstats']
setup(name=DISTNAME,
version=FULLVERSION,
maintainer=AUTHOR,
package_dir={'':'.'},
packages=find_packages(),
package_data = {'' : ['*.in','*.cfg','*.html','*.png','*.jpg','*.h'] },
scripts=scripts,
ext_modules=extensions,
setup_requires=['nose'],
install_requires=['argparse','numpy','matplotlib','scipy'],
test_suite = 'nose.collector',
maintainer_email=EMAIL,
description=DESCRIPTION,
zip_safe=False,
license=LICENSE,
cmdclass={'build_ext' : MyBuildExt,
'clean' : CleanCommand,
'bdist_rpm' : MyBDist_RPM},
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
**setuptools_kwargs)
for name,path in paths.iteritems():
if os.path.exists(path): print ">>>", path, 'exists'
else: print ">>>", path, 'does not exist'
| lgpl-2.1 |
jcmgray/xarray | xarray/core/computation.py | 1 | 42875 | """
Functions for applying functions that act on arrays to xarray's labeled data.
"""
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion
import functools
import itertools
import operator
from collections import Counter
import numpy as np
from . import duck_array_ops, utils, dtypes
from .alignment import deep_align
from .merge import expand_and_merge_variables
from .pycompat import OrderedDict, dask_array_type, basestring
from .utils import is_dict_like
_DEFAULT_FROZEN_SET = frozenset()
_NO_FILL_VALUE = utils.ReprObject('<no-fill-value>')
_DEFAULT_NAME = utils.ReprObject('<default-name>')
_JOINS_WITHOUT_FILL_VALUES = frozenset({'inner', 'exact'})
class _UFuncSignature(object):
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple]
Core dimension names on each input variable.
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = (self.all_input_core_dims |
self.all_output_core_dims)
return self._all_core_dims
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (self.input_core_dims == other.input_core_dims and
self.output_core_dims == other.output_core_dims)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('%s(%r, %r)'
% (type(self).__name__,
list(self.input_core_dims),
list(self.output_core_dims)))
def __str__(self):
lhs = ','.join('({})'.format(','.join(dims))
for dims in self.input_core_dims)
rhs = ','.join('({})'.format(','.join(dims))
for dims in self.output_core_dims)
return '{}->{}'.format(lhs, rhs)
def to_gufunc_string(self):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
"""
all_dims = self.all_core_dims
dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))
input_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims]
output_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def result_name(objects):
# type: List[object] -> Any
# use the same naming heuristics as pandas:
# https://github.com/blaze/blaze/issues/458#issuecomment-51936356
names = {getattr(obj, 'name', _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
name, = names
else:
name = None
return name
def _get_coord_variables(args):
input_coords = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coord_vars = getattr(coords, 'variables', coords)
input_coords.append(coord_vars)
return input_coords
def build_output_coords(
args, # type: list
signature, # type: _UFuncSignature
exclude_dims=frozenset(), # type: set
):
"""Build output coordinates for an operation.
Parameters
----------
args : list
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : optional set
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
Returns
-------
OrderedDict of Variable objects with merged coordinates.
"""
# type: (...) -> List[OrderedDict[Any, Variable]]
input_coords = _get_coord_variables(args)
if exclude_dims:
input_coords = [OrderedDict((k, v) for k, v in coord_vars.items()
if exclude_dims.isdisjoint(v.dims))
for coord_vars in input_coords]
if len(input_coords) == 1:
# we can skip the expensive merge
unpacked_input_coords, = input_coords
merged = OrderedDict(unpacked_input_coords)
else:
merged = expand_and_merge_variables(input_coords)
output_coords = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered = OrderedDict((k, v) for k, v in merged.items()
if dropped_dims.isdisjoint(v.dims))
else:
filtered = merged
output_coords.append(filtered)
return output_coords
def apply_dataarray_ufunc(func, *args, **kwargs):
"""apply_dataarray_ufunc(func, *args, signature, join='inner',
exclude_dims=frozenset())
"""
from .dataarray import DataArray
signature = kwargs.pop('signature')
join = kwargs.pop('join', 'inner')
exclude_dims = kwargs.pop('exclude_dims', _DEFAULT_FROZEN_SET)
if kwargs:
raise TypeError('apply_dataarray_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
name = result_name(args)
result_coords = build_output_coords(args, signature, exclude_dims)
data_vars = [getattr(a, 'variable', a) for a in args]
result_var = func(*data_vars)
if signature.num_outputs > 1:
out = tuple(DataArray(variable, coords, name=name, fastpath=True)
for variable, coords in zip(result_var, result_coords))
else:
coords, = result_coords
out = DataArray(result_var, coords, name=name, fastpath=True)
return out
def ordered_set_union(all_keys):
# type: List[Iterable] -> Iterable
result_dict = OrderedDict()
for keys in all_keys:
for key in keys:
result_dict[key] = None
return result_dict.keys()
def ordered_set_intersection(all_keys):
# type: List[Iterable] -> Iterable
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
'exact match required for all data variable names, '
'but %r != %r' % (keys, first_keys))
return first_keys
_JOINERS = {
'inner': ordered_set_intersection,
'outer': ordered_set_union,
'left': operator.itemgetter(0),
'right': operator.itemgetter(-1),
'exact': assert_and_return_exact_match,
}
def join_dict_keys(objects, how='inner'):
# type: (Iterable[Union[Mapping, Any]], str) -> Iterable
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, 'keys')]
return joiner(all_keys)
def collect_dict_values(objects, keys, fill_value=None):
# type: (Iterable[Union[Mapping, Any]], Iterable, Any) -> List[list]
return [[obj.get(key, fill_value)
if is_dict_like(obj)
else obj
for obj in objects]
for key in keys]
def _as_variables_or_variable(arg):
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars, # type: Mapping[Any, Tuple[Variable]]
num_outputs, # type: int
):
# type: (...) -> Tuple[Dict[Any, Variable]]
out = tuple(OrderedDict() for _ in range(num_outputs))
for name, values in result_vars.items():
for value, results_dict in zip(values, out):
results_dict[name] = value
return out
def apply_dict_of_variables_ufunc(func, *args, **kwargs):
"""apply_dict_of_variables_ufunc(func, *args, signature, join='inner',
fill_value=None):
"""
signature = kwargs.pop('signature')
join = kwargs.pop('join', 'inner')
fill_value = kwargs.pop('fill_value', None)
if kwargs:
raise TypeError('apply_dict_of_variables_ufunc() got unexpected '
'keyword arguments: %s' % list(kwargs))
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = OrderedDict()
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(variables, coord_variables):
# type: (OrderedDict[Any, Variable], Mapping[Any, Variable]) -> Dataset
"""Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._from_vars_and_coord_names(variables, coord_names)
def apply_dataset_ufunc(func, *args, **kwargs):
"""apply_dataset_ufunc(func, *args, signature, join='inner',
dataset_join='inner', fill_value=None,
exclude_dims=frozenset(), keep_attrs=False):
If dataset_join != 'inner', a non-default fill_value must be supplied
by the user. Otherwise a TypeError is raised.
"""
from .dataset import Dataset
signature = kwargs.pop('signature')
join = kwargs.pop('join', 'inner')
dataset_join = kwargs.pop('dataset_join', 'inner')
fill_value = kwargs.pop('fill_value', None)
exclude_dims = kwargs.pop('exclude_dims', _DEFAULT_FROZEN_SET)
keep_attrs = kwargs.pop('keep_attrs', False)
first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True
if (dataset_join not in _JOINS_WITHOUT_FILL_VALUES and
fill_value is _NO_FILL_VALUE):
raise TypeError('to apply an operation to datasets with different '
'data variables with apply_ufunc, you must supply the '
'dataset_fill_value argument.')
if kwargs:
raise TypeError('apply_dataset_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
list_of_coords = build_output_coords(args, signature, exclude_dims)
args = [getattr(arg, 'data_vars', arg) for arg in args]
result_vars = apply_dict_of_variables_ufunc(
func, *args, signature=signature, join=dataset_join,
fill_value=fill_value)
if signature.num_outputs > 1:
out = tuple(_fast_dataset(*args)
for args in zip(result_vars, list_of_coords))
else:
coord_vars, = list_of_coords
out = _fast_dataset(result_vars, coord_vars)
if keep_attrs and isinstance(first_obj, Dataset):
if isinstance(out, tuple):
out = tuple(ds._copy_attrs_from(first_obj) for ds in out)
else:
out._copy_attrs_from(first_obj)
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_ufunc(func, *args):
from .groupby import GroupBy, peek_at
from .variable import Variable
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, 'must have at least one groupby to iterate over'
first_groupby = groupbys[0]
if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):
raise ValueError('apply_ufunc can only perform operations over '
'multiple GroupBy objets at once if they are all '
'grouped the same way')
grouped_dim = first_groupby._group.name
unique_values = first_groupby._unique_coord.values
iterators = []
for arg in args:
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, 'dims') and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
'groupby operations cannot be performed with '
'xarray.Variable objects that share a dimension with '
'the grouped dimension')
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied = (func(*zipped_args) for zipped_args in zip(*iterators))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(variables, exclude_dims=frozenset()):
# type: Iterable[Variable] -> OrderedDict[Any, int]
dim_sizes = OrderedDict()
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions on a variable: %r' % list(var.dims))
for dim, size in zip(var.dims, var.shape):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension '
'%r: %s vs %s'
% (dim, dim_sizes[dim], size))
return dim_sizes
SLICE_NONE = slice(None)
# A = TypeVar('A', numpy.ndarray, dask.array.Array)
def broadcast_compat_data(variable, broadcast_dims, core_dims):
# type: (Variable[A], tuple, tuple) -> A
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
'operand to apply_ufunc has required core dimensions %r, but '
'some of these are missing on the input variable: %r'
% (list(core_dims), missing_core_dims))
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError('operand to apply_ufunc encountered unexpected '
'dimensions %r on an input variable: these are core '
'dimensions on other input or output variables'
% unexpected_dims)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def apply_variable_ufunc(func, *args, **kwargs):
"""apply_variable_ufunc(func, *args, signature, exclude_dims=frozenset())
"""
from .variable import Variable
signature = kwargs.pop('signature')
exclude_dims = kwargs.pop('exclude_dims', _DEFAULT_FROZEN_SET)
dask = kwargs.pop('dask', 'forbidden')
output_dtypes = kwargs.pop('output_dtypes', None)
output_sizes = kwargs.pop('output_sizes', None)
keep_attrs = kwargs.pop('keep_attrs', False)
if kwargs:
raise TypeError('apply_variable_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
dim_sizes = unified_dim_sizes((a for a in args if hasattr(a, 'dims')),
exclude_dims=exclude_dims)
broadcast_dims = tuple(dim for dim in dim_sizes
if dim not in signature.all_core_dims)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)]
if any(isinstance(array, dask_array_type) for array in input_data):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
input_dims = [broadcast_dims + dims
for dims in signature.input_core_dims]
numpy_func = func
def func(*arrays):
return _apply_with_dask_atop(
numpy_func, arrays, input_dims, output_dims,
signature, output_dtypes, output_sizes)
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling in '
'apply_ufunc: {}'.format(dask))
result_data = func(*input_data)
if signature.num_outputs > 1:
output = []
for dims, data in zip(output_dims, result_data):
var = Variable(dims, data)
if keep_attrs and isinstance(args[0], Variable):
var.attrs.update(args[0].attrs)
output.append(var)
return tuple(output)
else:
dims, = output_dims
var = Variable(dims, result_data)
if keep_attrs and isinstance(args[0], Variable):
var.attrs.update(args[0].attrs)
return var
def _apply_with_dask_atop(func, args, input_dims, output_dims, signature,
output_dtypes, output_sizes=None):
import dask.array as da
if signature.num_outputs > 1:
raise NotImplementedError('multiple outputs from apply_ufunc not yet '
"supported with dask='parallelized'")
if output_dtypes is None:
raise ValueError('output dtypes (output_dtypes) must be supplied to '
"apply_func when using dask='parallelized'")
if not isinstance(output_dtypes, list):
raise TypeError('output_dtypes must be a list of objects coercible to '
'numpy dtypes, got {}'.format(output_dtypes))
if len(output_dtypes) != signature.num_outputs:
raise ValueError('apply_ufunc arguments output_dtypes and '
'output_core_dims must have the same length: {} vs {}'
.format(len(output_dtypes), signature.num_outputs))
(dtype,) = output_dtypes
if output_sizes is None:
output_sizes = {}
new_dims = signature.all_output_core_dims - signature.all_input_core_dims
if any(dim not in output_sizes for dim in new_dims):
raise ValueError("when using dask='parallelized' with apply_ufunc, "
'output core dimensions not found on inputs must '
'have explicitly set sizes with ``output_sizes``: {}'
.format(new_dims))
for n, (data, core_dims) in enumerate(
zip(args, signature.input_core_dims)):
if isinstance(data, dask_array_type):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
'dimension {!r} on {}th function argument to '
"apply_ufunc with dask='parallelized' consists of "
'multiple chunks, but is also a core dimension. To '
'fix, rechunk into a single dask array chunk along '
'this dimension, i.e., ``.rechunk({})``, but beware '
'that this may significantly increase memory usage.'
.format(dim, n, {dim: -1}))
(out_ind,) = output_dims
atop_args = []
for arg, dims in zip(args, input_dims):
# skip leading dimensions that are implicitly added by broadcasting
ndim = getattr(arg, 'ndim', 0)
trimmed_dims = dims[-ndim:] if ndim else ()
atop_args.extend([arg, trimmed_dims])
return da.atop(func, out_ind, *atop_args, dtype=dtype, concatenate=True,
new_axes=output_sizes)
def apply_array_ufunc(func, *args, **kwargs):
"""apply_array_ufunc(func, *args, dask='forbidden')
"""
dask = kwargs.pop('dask', 'forbidden')
if kwargs:
raise TypeError('apply_array_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
if any(isinstance(arg, dask_array_type) for arg in args):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
raise ValueError("cannot use dask='parallelized' for apply_ufunc "
'unless at least one input is an xarray object')
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling: {}'
.format(dask))
return func(*args)
def apply_ufunc(func, *args, **kwargs):
"""apply_ufunc(func : Callable,
*args : Any,
input_core_dims : Optional[Sequence[Sequence]] = None,
output_core_dims : Optional[Sequence[Sequence]] = ((),),
exclude_dims : Collection = frozenset(),
vectorize : bool = False,
join : str = 'exact',
dataset_join : str = 'exact',
dataset_fill_value : Any = _NO_FILL_VALUE,
keep_attrs : bool = False,
kwargs : Mapping = None,
dask : str = 'forbidden',
output_dtypes : Optional[Sequence] = None,
output_sizes : Optional[Mapping[Any, int]] = None)
Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
For illustrative purposes only, here are examples of how you could use
``apply_ufunc`` to write functions to (very nearly) replicate existing
xarray functionality:
Calculate the vector magnitude of two arguments::
def magnitude(a, b):
func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
return xr.apply_func(func, a, b)
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
""" # noqa: E501 # don't error on that URL one line up
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
input_core_dims = kwargs.pop('input_core_dims', None)
output_core_dims = kwargs.pop('output_core_dims', ((),))
vectorize = kwargs.pop('vectorize', False)
join = kwargs.pop('join', 'exact')
dataset_join = kwargs.pop('dataset_join', 'exact')
keep_attrs = kwargs.pop('keep_attrs', False)
exclude_dims = kwargs.pop('exclude_dims', frozenset())
dataset_fill_value = kwargs.pop('dataset_fill_value', _NO_FILL_VALUE)
kwargs_ = kwargs.pop('kwargs', None)
dask = kwargs.pop('dask', 'forbidden')
output_dtypes = kwargs.pop('output_dtypes', None)
output_sizes = kwargs.pop('output_sizes', None)
if kwargs:
raise TypeError('apply_ufunc() got unexpected keyword arguments: %s'
% list(kwargs))
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims and not exclude_dims <= signature.all_core_dims:
raise ValueError('each dimension in `exclude_dims` must also be a '
'core dimension in the function signature')
if kwargs_:
func = functools.partial(func, **kwargs_)
if vectorize:
if signature.all_core_dims:
# we need the signature argument
if LooseVersion(np.__version__) < '1.12': # pragma: no cover
raise NotImplementedError(
'numpy 1.12 or newer required when using vectorize=True '
'in xarray.apply_ufunc with non-scalar output core '
'dimensions.')
func = np.vectorize(func,
otypes=output_dtypes,
signature=signature.to_gufunc_string(),
excluded=set(kwargs))
else:
func = np.vectorize(func,
otypes=output_dtypes,
excluded=set(kwargs))
variables_ufunc = functools.partial(apply_variable_ufunc, func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
output_dtypes=output_dtypes,
output_sizes=output_sizes)
if any(isinstance(a, GroupBy) for a in args):
# kwargs has already been added into func
this_apply = functools.partial(apply_ufunc, func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask)
return apply_groupby_ufunc(this_apply, *args)
elif any(is_dict_like(a) for a in args):
return apply_dataset_ufunc(variables_ufunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
fill_value=dataset_fill_value,
dataset_join=dataset_join,
keep_attrs=keep_attrs)
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_ufunc(variables_ufunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims)
elif any(isinstance(a, Variable) for a in args):
return variables_ufunc(*args)
else:
return apply_array_ufunc(func, *args, dask=dask)
def dot(*arrays, **kwargs):
""" dot(*arrays, dims=None)
Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: str or tuple of strings, optional
Which dimensions to sum over.
If not speciified, then all the common dimensions are summed over.
**kwargs: dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
dot: DataArray
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 4).reshape(3, 4), dims=['a', 'b'])
>>> da_b = xr.DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5),
>>> dims=['a', 'b', 'c'])
>>> da_c = xr.DataArray(np.arange(5 * 6).reshape(5, 6), dims=['c', 'd'])
>>>
>>> xr.dot(da_a, da_b, dims=['a', 'b']).dims
('c', )
>>> xr.dot(da_a, da_b, dims=['a']).dims
('b', 'c')
>>> xr.dot(da_a, da_b, da_c, dims=['b', 'c']).dims
('a', 'd')
"""
from .dataarray import DataArray
from .variable import Variable
dims = kwargs.pop('dims', None)
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError('Only xr.DataArray and xr.Variable are supported.'
'Given {}.'.format([type(arr) for arr in arrays]))
if len(arrays) == 0:
raise TypeError('At least one array should be given.')
if isinstance(dims, basestring):
dims = (dims, )
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = 'abcdefghijklmnopqrstuvwxyz'
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims
if d in common_dims and d not in dims)
input_core_dims = [[d for d in arr.dims if d not in broadcast_dims]
for arr in arrays]
output_core_dims = [tuple(d for d in all_dims if d not in
dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = ['...' + ''.join([dim_map[d] for d in ds]) for ds
in input_core_dims]
subscripts = ','.join(subscripts_list)
subscripts += '->...' + ''.join([dim_map[d] for d in output_core_dims[0]])
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(func, *arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
dask='allowed')
return result.transpose(*[d for d in all_dims if d in result.dims])
def where(cond, x, y):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset with boolean dtype
When True, return values from `x`, otherwise returns values from `y`.
x, y : scalar, array, Variable, DataArray or Dataset
Values from which to choose. All dimension coordinates on these objects
must be aligned with each other and with `cond`.
Returns
-------
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> cond = xr.DataArray([True, False], dims=['x'])
>>> x = xr.DataArray([1, 2], dims=['y'])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where : equivalent methods
"""
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(duck_array_ops.where,
cond, x, y,
join='exact',
dataset_join='exact',
dask='allowed')
| apache-2.0 |
gietal/Stocker | sandbox/udacity/1.py | 1 | 1188 | import pandas as pd
import matplotlib.pyplot as plt
def testRun():
df = pd.read_csv("data/MSFT.csv")
print df.head()
def getMaxClose(symbol):
df = pd.read_csv("data/{}.csv".format(symbol)) # read data
return df['Close'].max()
def getMeanVolume(symbol):
df = pd.read_csv("data/{}.csv".format(symbol))
return df['Volume'].mean()
def showData():
for symbol in ['MSFT', 'TSLA']:
print "{}, max close: {}, mean volume: {} ".format(symbol, getMaxClose(symbol), getMeanVolume(symbol))
#print symbol, getMaxClose(symbol)
def plotHigh(symbol):
df = pd.read_csv("data/{}.csv".format(symbol))
df['High'].plot()
plt.xlabel('time')
plt.ylabel('price')
plt.title('{} High price'.format(symbol))
plt.show() # must be called to show plot
def plotClose(symbol):
df = pd.read_csv("data/{}.csv".format(symbol))
df[['Close', 'Adj Close']].plot()
plt.xlabel('time')
plt.ylabel('price')
plt.title('{} Close and Adj Close price'.format(symbol))
plt.show() # must be called to show plot
def plotData():
plotClose('TSLA')
if __name__ == "__main__":
plotData()
| mit |
gheshu/synth | src/plot.py | 1 | 1372 | import math
import matplotlib.pyplot as plt
tau = 6.2831853
pi = 3.141592
samples = 10
dphase = tau / samples
def lerp(a, b, alpha):
return (1.0 - alpha) * a + alpha * b
def saw_wave(phase):
return lerp(-1.0, 1.0, phase / tau)
def sine_wave(phase):
return math.sin(phase)
def square_wave(phase):
if phase < pi:
return 1.0
return -1.0
def triangle_wave(phase):
if phase < pi:
return lerp(-1.0, 1.0, phase / pi)
phase -= pi
return lerp(1.0, -1.0, phase / pi)
def clamp(x, a, b):
return min(max(x, a), b)
def quadratic_bezier(t, p):
t0 = (1.0 - t) * (1.0 - t) * p[0]
t1 = 2.0 * (1.0 - t) * t * p[1]
t2 = t * t * p[2]
return t0 + t1 + t2
def envelope(t, durations, beziers, num_states):
state = 0
while state < num_states - 1:
if t < durations[state]:
break
t -= durations[state]
state += 1
normalized_time = clamp(t / durations[state], 0.0, 1.0)
return quadratic_bezier(normalized_time, beziers[state])
t = 0.0
dt = 0.01
durations = [0.5, 0.5, 0.5, 0.5]
beziers = [
[0.0, 0.0, 1.0],
[1.0, 0.5, 0.5],
[0.5, 0.5, 0.5],
[0.5, 0.0, 0.0]
]
num_states = 4
values = []
while t < 4.0:
values.append(envelope(t, durations, beziers, num_states))
t += dt
plt.plot(values)
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show() | apache-2.0 |
samuel1208/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
YuanGunGun/zeppelin | python/src/main/resources/python/bootstrap_sql.py | 60 | 1189 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
| apache-2.0 |
CERNDocumentServer/invenio | modules/bibauthorid/lib/bibauthorid_tortoise.py | 3 | 16189 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio import bibauthorid_config as bconfig
from datetime import datetime
import os
#import cPickle as SER
import msgpack as SER
import gzip as filehandler
import gc
import numpy as np
#This is supposed to defeat a bit of the python vm performance losses:
import sys
sys.setcheckinterval(1000000)
try:
from collections import defaultdict
except:
from invenio.containerutils import defaultdict
from itertools import groupby, chain, repeat
from invenio.bibauthorid_general_utils import update_status, update_status_final, override_stdout_config, override_stdout_config
override_stdout_config(fileout=True, stdout=False)
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_marktables
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_personid
from invenio.bibauthorid_wedge import wedge
from invenio.bibauthorid_name_utils import generate_last_name_cluster_str
from invenio.bibauthorid_backinterface import empty_tortoise_results_table
from invenio.bibauthorid_backinterface import remove_clusters_by_name
from invenio.bibauthorid_general_utils import bibauthor_print
from invenio.bibauthorid_prob_matrix import prepare_matirx
#Scheduler is [temporarily] deprecated in favour of the much simpler schedule_workers
#from invenio.bibauthorid_scheduler import schedule, matrix_coefs
from invenio.bibauthorid_least_squares import to_function as create_approx_func
from invenio.bibauthorid_general_utils import schedule_workers
#python2.4 compatibility
from invenio.bibauthorid_general_utils import bai_all as all
'''
There are three main entry points to tortoise
i) tortoise
Performs disambiguation iteration.
The arguemnt pure indicates whether to use
the claims and the rejections or not.
Use pure=True only to test the accuracy of tortoise.
ii) tortoise_from_scratch
NOT RECOMMENDED!
Use this function only if you have just
installed invenio and this is your first
disambiguation or if personid is broken.
iii) tortoise_last_name
Computes the clusters for only one last name
group. Is is primary used for testing. It
may also be used to fix a broken last name
cluster. It does not involve multiprocessing
so it is convinient to debug with pdb.
'''
# Exit codes:
# The standard ones are not well documented
# so we are using random numbers.
def tortoise_from_scratch():
bibauthor_print("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
bibauthor_print("Building all matrices.")
schedule_workers(lambda x: force_create_matrix(x, force=True), cluster_sets)
empty_tortoise_results_table()
bibauthor_print("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
bibauthor_print("Starting disambiguation.")
schedule_workers(wedge, cluster_sets)
def tortoise(pure=False,
force_matrix_creation=False,
skip_matrix_creation=False,
last_run=None):
assert not force_matrix_creation or not skip_matrix_creation
# The computation must be forced in case we want
# to compute pure results
force_matrix_creation = force_matrix_creation or pure
if not skip_matrix_creation:
bibauthor_print("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
bibauthor_print("Building all matrices.")
schedule_workers(lambda x: force_create_matrix(x, force=force_matrix_creation), clusters)
bibauthor_print("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
bibauthor_print("Starting disambiguation.")
schedule_workers(wedge_and_store, clusters)
def tortoise_last_name(name, from_mark=True, pure=False):
bibauthor_print('Start working on %s' % name)
assert not(from_mark and pure)
lname = generate_last_name_cluster_str(name)
if from_mark:
bibauthor_print(' ... from mark!')
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
bibauthor_print(' ... delayed done')
else:
bibauthor_print(' ... from pid, pure=%s'%str(pure))
clusters, lnames, sizes = delayed_cluster_sets_from_personid(pure)
bibauthor_print(' ... delayed pure done!')
# try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
cluster_set = cluster()
bibauthor_print("Found, %s(%s). Total number of bibs: %d." % (name, lname, size))
create_matrix(cluster_set, False)
wedge_and_store(cluster_set)
# except (IndexError, ValueError), e:
# print e
# raise e
# bibauthor_print("Sorry, %s(%s) not found in the last name clusters" % (name, lname))
def tortoise_last_names(names_list):
schedule_workers(tortoise_last_name, names_list)
def _collect_statistics_lname_coeff(params):
lname = params[0]
coeff = params[1]
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
bibauthor_print("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, True, coeff)
remove_clusters_by_name(cluster_set.last_name)
except (IndexError, ValueError):
bibauthor_print("Sorry, %s not found in the last name clusters," % (lname))
def _create_matrix(lname):
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
bibauthor_print("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
cluster_set.store()
except (IndexError, ValueError):
bibauthor_print("Sorry, %s not found in the last name clusters, not creating matrix" % (lname))
def tortoise_tweak_coefficient(lastnames, min_coef, max_coef, stepping, build_matrix=True):
bibauthor_print('Coefficient tweaking!')
bibauthor_print('Cluster sets from mark...')
lnames = set([generate_last_name_cluster_str(n) for n in lastnames])
coefficients = [x/100. for x in range(int(min_coef*100),int(max_coef*100),int(stepping*100))]
if build_matrix:
schedule_workers(_create_matrix, lnames)
schedule_workers(_collect_statistics_lname_coeff, ((x,y) for x in lnames for y in coefficients ))
def tortoise_coefficient_statistics(pickle_output=None, generate_graphs=True):
import matplotlib.pyplot as plt
plt.ioff()
def _gen_plot(data, filename):
plt.clf()
ax = plt.subplot(111)
ax.grid(visible=True)
x = sorted(data.keys())
w = [data[k][0] for k in x]
try:
wscf = max(w)
except:
wscf = 0
w = [float(i)/wscf for i in w]
y = [data[k][1] for k in x]
maxi = [data[k][3] for k in x]
mini = [data[k][2] for k in x]
lengs = [data[k][4] for k in x]
try:
ml = float(max(lengs))
except:
ml = 1
lengs = [k/ml for k in lengs]
normalengs = [data[k][5] for k in x]
ax.plot(x,y,'-o',label='avg')
ax.plot(x,maxi,'-o', label='max')
ax.plot(x,mini,'-o', label='min')
ax.plot(x,w, '-x', label='norm %s' % str(wscf))
ax.plot(x,lengs,'-o',label='acl %s' % str(int(ml)))
ax.plot(x,normalengs, '-o', label='ncl')
plt.ylim(ymax = 1., ymin = -0.01)
plt.xlim(xmax = 1., xmin = -0.01)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=6, mode="expand", borderaxespad=0.)
plt.savefig(filename)
override_stdout_config(stdout=True)
files = ['/tmp/baistats/'+x for x in os.listdir('/tmp/baistats/') if x.startswith('cluster_status_report_pid')]
fnum = float(len(files))
quanta = .1/fnum
total_stats = 0
used_coeffs = set()
used_clusters = set()
#av_counter, avg, min, max, nclus, normalized_avg
cluster_stats = defaultdict(lambda : defaultdict(lambda : [0.,0.,0.,0.,0.,0.]))
coeff_stats = defaultdict(lambda : [0.,0.,0.,0.,0.,0.])
def gen_graphs(only_synthetic=False):
update_status(0, 'Generating coefficients graph...')
_gen_plot(coeff_stats, '/tmp/graphs/AAAAA-coefficients.svg')
if not only_synthetic:
cn = cluster_stats.keys()
l = float(len(cn))
for i,c in enumerate(cn):
update_status(i/l, 'Generating name graphs... %s' % str(c))
_gen_plot(cluster_stats[c], '/tmp/graphs/CS-%s.png' % str(c))
for i,fi in enumerate(files):
if generate_graphs:
if i%1000 ==0:
gen_graphs(True)
f = filehandler.open(fi,'r')
status = i/fnum
update_status(status, 'Loading '+ fi[fi.find('lastname')+9:])
contents = SER.load(f)
f.close()
cur_coef = contents[0]
cur_clust = contents[1]
cur_maxlen = float(contents[3])
if cur_coef:
total_stats += 1
used_coeffs.add(cur_coef)
used_clusters.add(cur_clust)
update_status(status+0.2*quanta, ' Computing averages...')
cur_clen = len(contents[2])
cur_coeffs = [x[2] for x in contents[2]]
cur_clustnumber = float(len(set([x[0] for x in contents[2]])))
assert cur_clustnumber > 0 and cur_clustnumber < cur_maxlen, "Error, found log with strange clustnumber! %s %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_maxlen),
str(cur_clustnumber))
if cur_coeffs:
assert len(cur_coeffs) == cur_clen and cur_coeffs, "Error, there is a cluster witohut stuff? %s %s %s"% (str(cur_clust), str(cur_coef), str(cur_coeffs))
assert all([x >= 0 and x <= 1 for x in cur_coeffs]), "Error, a coefficient is wrong here! Check me! %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_coeffs))
cur_min = min(cur_coeffs)
cur_max = max(cur_coeffs)
cur_avg = sum(cur_coeffs)/cur_clen
update_status(status+0.4*quanta, ' comulative per coeff...')
avi = coeff_stats[cur_coef][0]
#number of points
coeff_stats[cur_coef][0] = avi+1
#average of coefficients
coeff_stats[cur_coef][1] = (coeff_stats[cur_coef][1]*avi + cur_avg)/(avi+1)
#min coeff
coeff_stats[cur_coef][2] = min(coeff_stats[cur_coef][2], cur_min)
#max coeff
coeff_stats[cur_coef][3] = max(coeff_stats[cur_coef][3], cur_max)
#avg number of clusters
coeff_stats[cur_coef][4] = (coeff_stats[cur_coef][4]*avi + cur_clustnumber)/(avi+1)
#normalized avg number of clusters
coeff_stats[cur_coef][5] = (coeff_stats[cur_coef][5]*avi + cur_clustnumber/cur_maxlen)/(avi+1)
update_status(status+0.6*quanta, ' comulative per cluster per coeff...')
avi = cluster_stats[cur_clust][cur_coef][0]
cluster_stats[cur_clust][cur_coef][0] = avi+1
cluster_stats[cur_clust][cur_coef][1] = (cluster_stats[cur_clust][cur_coef][1]*avi + cur_avg)/(avi+1)
cluster_stats[cur_clust][cur_coef][2] = min(cluster_stats[cur_clust][cur_coef][2], cur_min)
cluster_stats[cur_clust][cur_coef][3] = max(cluster_stats[cur_clust][cur_coef][3], cur_max)
cluster_stats[cur_clust][cur_coef][4] = (cluster_stats[cur_clust][cur_coef][4]*avi + cur_clustnumber)/(avi+1)
cluster_stats[cur_clust][cur_coef][5] = (cluster_stats[cur_clust][cur_coef][5]*avi + cur_clustnumber/cur_maxlen)/(avi+1)
update_status_final('Done!')
if generate_graphs:
gen_graphs()
if pickle_output:
update_status(0,'Dumping to file...')
f = open(pickle_output,'w')
SER.dump({'cluster_stats':dict((x,dict(cluster_stats[x])) for x in cluster_stats.iterkeys()), 'coeff_stats':dict((coeff_stats))}, f)
f.close()
def create_matrix(cluster_set, force):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start building matrix for %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
return prepare_matirx(cluster_set, force)
def force_create_matrix(cluster_set, force):
bibauthor_print("Building a cluster set.")
return create_matrix(cluster_set(), force)
def wedge_and_store(cluster_set):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set)
remove_clusters_by_name(cluster_set.last_name)
cluster_set.store()
return True
def force_wedge_and_store(cluster_set):
bibauthor_print("Building a cluster set.")
return wedge_and_store(cluster_set())
#[temporarily] deprecated
#def schedule_create_matrix(cluster_sets, sizes, force):
# def create_job(cluster):
# def ret():
# return force_create_matrix(cluster, force)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%smatrix_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
#
#
#def schedule_wedge_and_store(cluster_sets, sizes):
# def create_job(cluster):
# def ret():
# return force_wedge_and_store(cluster)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%swedge_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
| gpl-2.0 |
rsignell-usgs/notebook | People/csherwood/read_garmin_gpx_calc_effort.py | 1 | 5449 |
# coding: utf-8
# # Read Garmin GPX with heartrate
#
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
from lxml import etree
get_ipython().magic(u'matplotlib inline')
# In[2]:
fn = "activity_721671330.gpx"
tree = etree.parse(fn)
# In[3]:
namespace = {'def': 'http://www.topografix.com/GPX/1/1',
'gpxtpx': 'http://www.garmin.com/xmlschemas/TrackPointExtension/v1',
'gpxx': 'http://www.garmin.com/xmlschemas/GpxExtensions/v3',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
# Within `trk` tags, find `trkpt` elements get element values
# In[4]:
elist = tree.xpath('./def:trk//def:trkpt',namespaces=namespace)
lonlat = [e.values() for e in elist]
lon = np.array([float(i[0]) for i in lonlat])
lat = np.array([float(i[1]) for i in lonlat])
print lon[0],lat[0],np.shape(lon)
# Within `trk` tags, find `time` elements and get element text
# In[5]:
elist = tree.xpath('./def:trk//def:time',namespaces=namespace)
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
time = [datetime.strptime(d.text, fmt) for d in elist]
print time[0], np.shape(time)
# Within `trk` tags, find `hr` elements and get element text. CRS changed this to return an array of floats.
# In[6]:
elist = tree.xpath("./def:trk//gpxtpx:hr", namespaces=namespace)
hr = np.array([float(e.text) for e in elist])
print hr[0], np.shape(hr)
# Make the dataframe
# In[7]:
df = pd.DataFrame.from_dict(dict(time=time, lon=lon, lat=lat, hr=hr))
df.set_index('time', drop=True, inplace=True)
# In[8]:
df.head(5)
# Plot the heartrate
# In[9]:
df['hr'].plot(figsize=(12,4));
# Calculate speed, effort, and efficiency. I have not figured out how to do this in Pandas, or how to avoid the loop when calculating time differences. The .total_seconds() conversion does not work on np arrays of datetime.deltatime objects.
# In[10]:
latr = np.radians(lat)
lonr = np.radians(lon)
dlatr = np.diff(latr)
dlonr = np.diff(lonr)
# Haversine formula for great circle
a = np.sin(dlatr/2.)**2 + np.cos(latr[0:-1]) * np.cos(latr[1:]) * np.sin(dlonr/2.)**2
c = 2. * np.arcsin(np.sqrt(a))
distm = 6367e3 * c
# distm is in meters.
print "distm",distm[0], np.shape(distm)
# this produces an array of datetime.deltatime objects
difft = np.diff(time)
print "np.diff(time)",difft[5].total_seconds(), np.shape(difft)
# there must be a better way:
dt = np.zeros_like(difft)
for i in np.arange(len(difft)):
dt[i]= float(difft[i].total_seconds())
etime = np.cumsum(dt)
speed = distm/dt
# calculate effort as fraction of usable hr range
hr_rest = 68. # resting rate
hr_ana = 162. # anaerobic threshold
effort = (hr-hr_rest)/(hr_ana-hr_rest)
# calculate efficiency
eff = speed/effort[1:]
print "Effort: ",effort[0], type(effort), np.shape(effort)
fig = plt.figure(figsize=(12,4))
plt.plot(etime/60.,speed,label='Speed')
plt.plot(etime/60.,eff,label='Efficiency')
plt.ylabel('m/s; m/s/effort')
plt.xlabel('Elapsed time (minutes)')
plt.legend()
# Plot lon/lat with Cartopy
# In[11]:
import cartopy.crs as ccrs
from cartopy.io.img_tiles import MapQuestOpenAerial
geodetic = ccrs.Geodetic(globe=ccrs.Globe(datum='WGS84'))
b=np.array([lon.min(), lat.min(), lon.max(), lat.max()])
plt.figure(figsize=(12,12))
# Open Source Imagery from MapQuest (max zoom = 16?)
tiler = MapQuestOpenAerial()
# Open Street Map (max zoom = 18?)
#tiler = OSM()
ax = plt.axes(projection=tiler.crs)
dx=b[2]-b[0]
dy=b[3]-b[1]
extent = (b[0]-0.1*dx,b[2]+0.1*dx,b[1]-0.1*dy,b[3]+0.1*dy)
ax.set_extent(extent, geodetic)
ax.add_image(tiler, 14)
plt.plot(lon[1:],lat[1:],'m-',transform=ccrs.PlateCarree());
# sheesh, this is embarassing
# 1) clip lat/lon and hr to length of other stuff (should actually interpolate to centerpoint)
lons = lon[1:]
lats = lat[1:]
# this does not work:
# plt.plot(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff);
# nor does this:
# for i in np.arange(len(lons)):
# plt.plot(lons[i],lats[i],transform=ccrs.PlateCarree(),marker='o',c=eff[i]);
# ax.scatter(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff);
# ax.scatter(lons,lats,marker='o',c=eff);
gl=ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
# In[12]:
eff = speed/effort[1:]
import cartopy.crs as ccrs
from cartopy.io.img_tiles import MapQuestOpenAerial
geodetic = ccrs.Geodetic(globe=ccrs.Globe(datum='WGS84'))
b=np.array([lon.min(), lat.min(), lon.max(), lat.max()])
plt.figure(figsize=(12,12))
# Open Source Imagery from MapQuest (max zoom = 16?)
tiler = MapQuestOpenAerial()
# Open Street Map (max zoom = 18?)
#tiler = OSM()
ax = plt.axes(projection=tiler.crs)
dx=b[2]-b[0]
dy=b[3]-b[1]
extent = (b[0]-0.1*dx,b[2]+0.1*dx,b[1]-0.1*dy,b[3]+0.1*dy)
ax.set_extent(extent, geodetic)
ax.add_image(tiler, 14)
# sheesh, this is embarassing
# 1) clip lat/lon and hr to length of other stuff (should actually interpolate to centerpoint)
lons = lon[1:]
lats = lat[1:]
# this does not work:
# plt.plot(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff);
# nor does this:
# for i in np.arange(len(lons)):
# plt.plot(lons[i],lats[i],transform=ccrs.PlateCarree(),marker='o',c=eff[i]);
kw = dict(alpha=0.5, lw=0 )
ax.scatter(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff.tolist(),**kw);
# ax.scatter(lons,lats,marker='o',c=eff);
gl=ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
# In[13]:
se = (eff - eff.min()) / eff.ptp()
| mit |
amitjamadagni/sympy | sympy/plotting/plot.py | 1 | 58450 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from inspect import getargspec
from itertools import chain
from sympy import sympify, Expr, Tuple, Dummy
from sympy.external import import_module
from sympy.core.compatibility import set_union
import warnings
from experimental_lambdify import (vectorized_lambdify, lambdify)
#TODO probably all of the imports after this line can be put inside function to
# speed up the `from sympy import *` command.
np = import_module('numpy')
# Backend specific imports - matplotlib
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib:
plt = matplotlib.pyplot
cm = matplotlib.cm
LineCollection = matplotlib.collections.LineCollection
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
art3d = mpl_toolkits.mplot3d.art3d
ListedColormap = matplotlib.colors.ListedColormap
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't
# show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.iteritems():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
def append(self, *args):
"""Adds one more graph to the figure."""
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series.append(*args)
else:
self._series.append(Series(*args))
def extend(self, arg):
"""Adds the series from another plot or a list of series."""
if isinstance(arg, Plot):
self._series.extend(arg._series)
else:
self._series.extend(arg)
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = map(centers_of_segments, self.get_points())
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a sympy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = map(f, xarray)
if any(y is not None for y in yarray):
for i in len(yarray) - 1:
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = map(f_x, param_array)
y_array = map(f_y, param_array)
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in len(y_array) - 1:
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
c = self.surface_color
if callable(c):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = map(centers_of_faces, self.get_parameter_meshes())
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = map(centers_of_faces, self.get_meshes())
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(True)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or callable(s.line_color):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or callable(s.surface_color):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
self.ax.legend()
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
def plot(*args, **kwargs):
"""
Plots a function of a single variable.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x - axis.
``ylabel`` : str. Label for the y - axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x - axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y - axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x - axis limits.
``ylim`` : tuple of two floats, denoting the y - axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))# doctest: +SKIP
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))# doctest: +SKIP
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))# doctest: +SKIP
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)# doctest: +SKIP
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = map(sympify, args)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x - axis.
``ylabel`` : str. Label for the y - axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x - axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y - axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x - axis limits.
``ylim`` : tuple of two floats, denoting the y - axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))# doctest: +SKIP
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u))) # doctest: +SKIP
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5))) # doctest: +SKIP
See Also
========
Plot, Parametric2DLineSeries
"""
args = map(sympify, args)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5)) # doctest: +SKIP
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5))) # doctest: +SKIP
See Also
========
Plot, Parametric3DLineSeries
"""
args = map(sympify, args)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5)) # doctest: +SKIP
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5)) # doctest: +SKIP
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3))) # doctest: +SKIP
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = map(sympify, args)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5)) # doctest: +SKIP
See Also
========
Plot, ParametricSurfaceSeries
"""
args = map(sympify, args)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x,y,u,v = symbols('x y u v')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
assert len(args) >= expr_len
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set_union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set_union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression"
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of"
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
h2educ/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 216 | 13290 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
MartinIsoz/MuPhFInCE | 00_utilities/procLogOnlineSS.py | 2 | 12534 | #!/usr/bin/python
#FILE DESCRIPTION=======================================================
#
# Simple python script to see the residuals evolution and other
# simulation characteristics of the steady state OpenFOAM runs
#
# Required:
# - file log.*Foam
# - file log.blockMesh or log.snappyHexMesh or direct specification
# of the number of the cells in the mesh
#USAGE==================================================================
# 1. Copy the script to a clean folder (or specify checkDir)
# 2. Run the script
#LICENSE================================================================
# procLog.py
#
# Copyright 2016 Martin Isoz <martin@Poctar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#IMPORT BLOCK===========================================================
# -- communication with the remote server
import os
# -- plot updating
import time
# -- math and other operations on the data
import math
import glob
import sys
import numpy as np
import re #regexp
# -- plotting
import matplotlib.pyplot as plt
import matplotlib
import textwrap
from itertools import cycle,chain
#CUSTOM FUNCTIONS=======================================================
# A. auxiliary visualization functions----------------------------------
def createCyclers():
""" function to create cyclers for plot styling in 1 line"""
# -- prepare the list entries
lines = ['-','--','-.',':']
markers = ['o','v','h','d','s','^']
colors = [
'dodgerblue',
'limegreen',
'darkmagenta',
'saddlebrown',
'firebrick',
'teal',
'gold',
]
return cycle(lines),cycle(markers),cycle(colors)
# B. auxiliary server contact functions---------------------------------
def avServers(rName):
return {
'Altix' : [#Altix UV 2000 at UCT prague
'altix.vscht.cz',
'isozm',
['/scratch/','/'],
'Altix UV 2000',
],
'Poctar': [#my personnal computer at UCT prague
'413-C407-Poctar.vscht.cz',
'martin',
['/media/','/Data_2/05_TextPlate/10_noTextureV2/'],
#~ ['/media/','/Data_2/05_TextPlate/40_pyrTextureV2/'],
#~ ['/media/','/Data_2/05_TextPlate/50_transTextureV2/'],
#~ ['/media/','/Data_2/05_TextPlate/60_longTextureV2/'],
'Intel Xeon E3',
],
}.get(rName,'Altix')
#INPUT VARIABLES========================================================
# -- for which variables I want to see the residuals
#~ varInt = ['alpha.liquid','p_rgh']
#~ varInt = ['p_rgh']
varInt = ['Ux','Uy','Uz','p']
# -- how many last iterations do I want to see?
# TBD
# -- which residuals I want to get (final or initial)
finRes = False
# -- checked case
#~ caseName= 'testCase'
#~ caseName= 'testCaseV2'
caseName= 'testCaseGas6'
# -- remote server
rName = 'Altix'
#~ rName = 'Poctar'
# -- which file to get
fileList= ['log.blockMesh', 'log.simpleFoam']
# -- periodic updating of the figures
updatePl= False #update the plot?
updInt = 20 #update interval in seconds
# -- graphical output parameters
eMS = 10 #marker size to show the current timestep
#OUTPUT VARIABLE========================================================
fileNm = 'simAnalysisData'
#DO NOT MODIFY (PREFERABLY)=============================================
# -- remote locations
rServer,rUser,caseDir,sTitle = avServers(rName) #get server description
caseDir = caseDir[0] + rUser + caseDir[1] + caseName + '/' #set proper caseDir
# -- local location
checkDir= os.getcwd() + '/' #get current directory
# -- figure window parameters
fig = plt.figure(num=None, figsize=(20, 12), dpi=80, facecolor='w', edgecolor='k')
plt.show(block=False)
# -- colors for plots with unknown number of lines
plotCols = [np.random.rand(3,1) for i in range(len(varInt))]
font = {
#~ 'family' : 'normal',
#~ 'weight' : 'normal',
'size' : 22
}
matplotlib.rc('font', **font)
#PROGRAM ITSELF=========================================================
while True:
#GET THE CURRENT DATA===============================================
for rFile in fileList:
os.system('rsync -v "%s:%s" "%s"' % (rUser + '@' + rServer,
caseDir + rFile,
checkDir) )
#PROCESS OTHER LOG FILES (MESH CHARACTERISTICS & OTHER)=============
with open(checkDir + fileList[0], 'r') as file:
# read a list of lines into data
data = file.readlines()
idStr = ['nCells: '] #get number of cells in the mesh
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>=0:
nCells = round(float(data[i][fInd+len(idStr[j])::])/1e6,1)
break
#LOAD THE FILE INTO MEMORY==========================================
with open(checkDir + fileList[1], 'r') as file:
# read a list of lines into data
data = file.readlines()
#GET NUMBER OF CORES USED (AND POSSIBLY OTHER DESCRIPTION)==========
idStr = [
'nProcs : ',
'Date : ',
'Time : ',
'Case : ',
'Exec : ',
'Build : ',
]
out = []
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd==0:
out.append(data[i][fInd+len(idStr[j])::])
break
nProcs = int(out[0]) #get number of cores
Date,Time,Case,Exec,Build = out[1::] #get start time and date and case
Date,Time,Case,Exec,Build = Date[0:-1],Time[0:-1],Case[0:-1],Exec[0:-1],Build[0:-1]
Case = Case.split('/') #extract relevant data from the case
Case = Case[-1]
ttlStr = ("%s, %d cores, %.1fMM cells, case: %s, "%(sTitle, nProcs, nCells, Case) +
"solver: %s, version: %s"%(Exec, Build))
plt.suptitle("\n".join(textwrap.wrap(ttlStr, 100)),
fontsize=24)
#PLOT TIMESTEPEVOLUTION=============================================
# -- not applicable for the steady state simulations
#PLOT THE RESIDUALS=================================================
# TBD: modification for being able to choose how many timesteps I
# want to see
partData = data
#~ partData.reverse() #flip the list
idStr = ['Solving for %s'%varName for varName in varInt] #in which variables am I interested
vec = [[] for i in range(len(varInt))] #reziduals
lVarInt = len(vec)
# Note: I suspect, that the bellow is not exactly pretty (but it works)
updLst = [False]*lVarInt #create list which tells me, if the variables were updated
for i in range(len(partData)): #go through the partData
if partData[i].find('Time = ') == 0: #if I reach a new iteration
k = 0
cVecLen = len(vec[0]) #get current veclength
aVecLen = cVecLen #auxiliary variable
while any(itm == False for itm in updLst): #if any of the variables is still not updated
for j in range(len(idStr)): #go throught the variables of interest
fInd = partData[i+k].find(idStr[j])
if fInd>-1 and not updLst[j]:
auxStr = partData[i+k][fInd::].split('residual = ') #lets split the rest using residual. should result in 3 substrings (2 occurences of the splitting string)
if not finRes:
vec[j].append(float(auxStr[1].split(',')[0])) #process the first occurence
else:
vec[j].append(float(auxStr[2].split(',')[0])) #process the last occurence
updLst[j] = True
k += 1
updLst = [False]*lVarInt #reset the updates
host = plt.subplot2grid((1,1),(0,0), colspan = 1)
plt.cla()
plt.xlim([0,max([len(vv) for vv in vec])])
#~ plt.grid(True, which='major')
mLen = max([len(v) for v in vec]) #maximal vector length
linecycler,markercycler,colorcycler=createCyclers() #restart the cyclers
for i in range(len(varInt)):
cLen = len(vec[i])
host.semilogy(np.linspace(0,mLen,cLen),vec[i],'-',c=next(colorcycler),label=varInt[i],lw=3)
host.set_ylabel('Reziduals')
host.set_xlabel('Number of iterations')
host.set_ylim([10**(math.floor(math.log10(min(min(vec))))),1])
host.legend(bbox_to_anchor=(0.7, 0.95), loc=2, borderaxespad=0.)
#PLOT CALCULATION TIME==============================================
idStr = [
'ExecutionTime = ',
]
vec = [[0.0],[0.0],[0]]
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
curTime = data[i].split(' ') #split the relevant line by spaces
curTime = float(curTime[2]) #hardcoded position (solver specific)
vec[j].append(curTime) #current execution time
vec[j+1].append(curTime-vec[j][-2]) #current timestep difference
vec[j+2].append(vec[j+2][-1]+1) #timestep number
meanVal = np.mean(vec[1]) #get mean value
vec.append([absVal/meanVal for absVal in vec[1]])
hExecTime = [execTime/3600 for execTime in vec[0]]
par = host.twinx()
par.fill_between(
np.linspace(0,len(vec[1]),len(vec[1])),0,vec[1],
color = next(colorcycler),
alpha = 0.3,
zorder = -1,
)
par.set_ylabel('Computation time / time step, [s]')
par.set_ylim([min(vec[1])*0.9,max(vec[1])*1.1])
plt.axis('tight')
#PLOT COURANT AND INTERFACE COURANT NUMBERS=========================
# -- not applicable for the steady state simulations
#PLOT RESIDUALS OVER LAST M TIME STEPS==============================
# -- not applicable for the steady state simulations
#SAVE THE RESULTING FIGURE AND UPDATE GRAPHS========================
# -- draw the figure
plt.tight_layout(rect=[0, 0.03, 1, 0.90])
plt.savefig('runAnalysis.png', dpi=160)
plt.draw()
if not updatePl:
break
time.sleep(updInt)
plt.show()
| gpl-2.0 |
henridwyer/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs_v5/ILINet_incid_time_v5.py | 1 | 4261 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 11/4/14
###Function: Incidence per 100,000 vs. week number for flu weeks (wks 40-20). Incidence is per 100,000 for the US population in the second calendar year of the flu season. ILINet data
## 11/4/14: Adjust for visits and care-seeking behavior.
###Import data: CDC_Source/Import_Data/all_cdc_source_data.csv, Census/Import_Data/totalpop_age_Census_98-14.csv
###Command Line: python ILINet_incid_time_v5.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
# 2013-14 ILINet data is normalized by estimated population size from December 2013 because 2014 estimates are not available at this time
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
incidin.readline() # remove header
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_ILINet_seasonlabels
colvec = fxn.gp_ILINet_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# import data
d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.ILINet_week_RR_processing(incid, pop)
d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season)
# dict_indices[(snum, classif period)] = [wk index 1, wk index 2, etc.]
d_indices = fxn.identify_retro_early_weeks(d_wk, d_totIncidAdj53ls)
# plot values
fig = plt.figure()
ax = plt.subplot(111)
for s, i in zip(ps, xrange(len(ps))):
ax.plot(xrange(fw), d_totIncid53ls[s][:fw], marker = fxn.gp_marker, color = colvec[i], label = sl[i], linewidth = fxn.gp_linewidth)
for s in ps:
beg_retro, end_retro = d_indices[(s, 'r')]
beg_early, end_early = d_indices[(s, 'e')]
plt.plot(range(beg_retro, end_retro), d_totIncid53ls[s][beg_retro:end_retro], marker = 'o', color = fxn.gp_retro_early_colors[0], linewidth = 2)
plt.plot(range(beg_early, end_early), d_totIncid53ls[s][beg_early:end_early], marker = 'o', color = fxn.gp_retro_early_colors[1], linewidth = 2)
plt.xlim([0, fw-1])
plt.xticks(range(fw)[::5], wklab[:fw:5])
plt.ylim([0, 15])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('ILI Visits per 100,000', fontsize=fs)
# shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width*0.9, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/ILINet/ILINet_incid_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
fig2 = plt.figure()
ax2 = plt.subplot(111)
for s, i in zip(ps, xrange(len(ps))):
ax2.plot(xrange(fw), d_totIncidAdj53ls[s][:fw], marker = fxn.gp_marker, color = colvec[i], label = sl[i], linewidth = fxn.gp_linewidth)
for s in ps:
beg_retro, end_retro = d_indices[(s, 'r')]
beg_early, end_early = d_indices[(s, 'e')]
plt.plot(range(beg_retro, end_retro), d_totIncidAdj53ls[s][beg_retro:end_retro], marker = 'o', color = fxn.gp_retro_early_colors[0], linewidth = 2)
plt.plot(range(beg_early, end_early), d_totIncidAdj53ls[s][beg_early:end_early], marker = 'o', color = fxn.gp_retro_early_colors[1], linewidth = 2)
plt.xlim([0, fw-1])
plt.xticks(range(fw)[::5], wklab[:fw:5])
plt.ylim([0, 60])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel(fxn.gp_adjILI, fontsize=fs)
# shrink current axis by 20%
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width*0.9, box.height])
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/ILINet/ILINet_incidAdj_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
| mit |
zehpunktbarron/iOSMAnalyzer | scripts/c3_highway_actuality.py | 1 | 6584 | # -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the actuality of the total OSM highway. Additionally plots the first version for comparison purposes
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
-- Currently valid version of the road
SELECT
coalesce(SUM -- Lenght of these highway-objects
(ST_Length
(ST_GeographyFromText
(ST_AsText
(ST_Transform(geom, 4326))
)
)
/1000)
, 0) AS length_spheroid,
date_trunc('month', valid_from)::date,
count(valid_from)::int -- Amount of highway-objects
FROM
hist_line
WHERE
visible = 'true' AND
((version = (SELECT max(version) from hist_line as h where h.id = hist_line.id AND
valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND minor = (SELECT max(minor) from hist_line as h where h.id = hist_line.id AND h.version = hist_line.version AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND
( -- Total OSM-road-network
((tags->'highway') = 'motorway') OR
((tags->'highway') = 'motorway_link') OR
((tags->'highway') = 'trunk') OR
((tags->'highway') = 'trunk_link') OR
((tags->'highway') = 'primary') OR
((tags->'highway') = 'primary_link') OR
((tags->'highway') = 'secondary') OR
((tags->'highway') = 'secondary_link') OR
((tags->'highway') = 'tertiary') OR
((tags->'highway') = 'tertiary_link') OR
((tags->'highway') = 'unclassified') OR
((tags->'highway') = 'residential') OR
((tags->'highway') = 'road') OR
((tags->'highway') = 'living_street') OR
((tags->'highway') = 'service') OR
((tags->'highway') = 'track') OR
((tags->'highway') = 'path') OR
((tags->'highway') = 'pedestrian') OR
((tags->'highway') = 'footway') OR
((tags->'highway') = 'cycleway') OR
((tags->'highway') = 'steps') OR
((tags->'highway') = 'platform') OR
((tags->'highway') = 'bridleway'))
)
GROUP BY date_trunc
ORDER BY date_trunc ASC;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
###
### Plot (Multiline-Chart)
###
# Datatypes of the returning data: column 1(col1) --> integer, column 2(date) --> string
datatypes = [('col1', 'double'), ('date', 'S20'), ('count', 'int')]
# Data-tuple and datatype
data = np.array(data_tuples, dtype=datatypes)
# Date comes from 'col1'
col1 = data['col1']
###
### Execute SQL query2
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur2 = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur2.execute("""
-- First created version of the road
SELECT
coalesce(SUM -- Lenght of these highway-objects
(ST_Length
(ST_GeographyFromText
(ST_AsText
(ST_Transform(geom, 4326))
)
)
/1000)
, 0) AS length_spheroid,
date_trunc('month', valid_from)::date,
count(valid_from)::int -- Amount of highway-objects
FROM
hist_line
WHERE
visible = 'true' AND
version = 1 AND minor = 0
AND
( -- Total OSM-road-network
((tags->'highway') = 'motorway') OR
((tags->'highway') = 'motorway_link') OR
((tags->'highway') = 'trunk') OR
((tags->'highway') = 'trunk_link') OR
((tags->'highway') = 'primary') OR
((tags->'highway') = 'primary_link') OR
((tags->'highway') = 'secondary') OR
((tags->'highway') = 'secondary_link') OR
((tags->'highway') = 'tertiary') OR
((tags->'highway') = 'tertiary_link') OR
((tags->'highway') = 'unclassified') OR
((tags->'highway') = 'residential') OR
((tags->'highway') = 'road') OR
((tags->'highway') = 'living_street') OR
((tags->'highway') = 'service') OR
((tags->'highway') = 'track') OR
((tags->'highway') = 'path') OR
((tags->'highway') = 'pedestrian') OR
((tags->'highway') = 'footway') OR
((tags->'highway') = 'cycleway') OR
((tags->'highway') = 'steps') OR
((tags->'highway') = 'platform') OR
((tags->'highway') = 'bridleway'))
GROUP BY date_trunc
ORDER BY date_trunc ASC;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples2 = []
for row2 in cur2:
data_tuples2.append(row2)
except:
print "Query could not be executed"
###
### Plot (Multiline-Chart)
###
# Datatypes of the returning data: column 1(col1) --> integer, column 2(date) --> string
datatypes2 = [('col1_2', 'double'), ('date_2', 'S20'), ('count_2', 'int')]
# Data-tuple and datatype
data2 = np.array(data_tuples2, dtype=datatypes2)
# Date comes from 'col1'
col1_2 = data2['col1_2']
# Converts date to a manageable date-format for matplotlib
dates = mdates.num2date(mdates.datestr2num(data['date']))
dates2 = mdates.num2date(mdates.datestr2num(data2['date_2']))
fig, ax = plt.subplots()
# Create linechart
plt.plot(dates, col1, color = '#2dd700', linewidth=2, label='currently valid version') # date of currently valid roads
plt.plot(dates2, col1_2, color = '#ff6700', linewidth=2, label='first version') # date of the first created roads
# Place a gray dashed grid behind the thicks (only for y-axis)
ax.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Date')
plt.ylabel('Length [km]')
# place legend
ax.legend(loc='upper right', prop={'size':12})
# Plot-title
plt.title("Actuality of the total Highway Length")
# Save plot to *.jpeg-file
plt.savefig('pics/c3_highway_actuality.jpeg')
plt.clf()
| gpl-3.0 |
oztalha/News-Commentary-Tweets-of-Elites | scrapers/scrape-theplazz.py | 2 | 1332 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 08 15:41:01 2015
@author: Talha
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
import time
#initialize variables
df = pd.DataFrame(columns=('title', 'twcount', 'href'))
driver = webdriver.Chrome()
# thePlazz.com Headlines 'http://theplazz.com/category/headlines/'
driver.get('file:///Users/toz/Documents/workspace/TR-2014/data/thePlazz.html')
time.sleep(60) #have to wait till the page is loaded completely
# This is how I retrieved thePlazz.html file, i.e.
# I added the two lines above for your convenience, myself never used
#driver.get('http://theplazz.com/category/headlines/')
#for i in range(600):
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# time.sleep(3)
news = driver.find_elements_by_xpath("//div[@class='post-top']")
for i,haber in enumerate(news):
title = haber.find_element_by_tag_name("h3").find_element_by_tag_name("a")
twcount = int(haber.find_element_by_xpath("div[@class='post-meta']").find_element_by_tag_name("a").text)
print i, title.text , twcount, title.get_attribute("href")
df.loc[len(df)+1]=[title.text , twcount, title.get_attribute("href")]
df['twcount']=df['twcount'].astype(int)
df.to_csv("US-news-org.csv",encoding='utf-8',index=False) | mit |
ngoix/OCRF | sklearn/gaussian_process/gaussian_process.py | 16 | 34896 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
jpautom/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
linwoodc3/gdeltPyR | tests/test_info.py | 1 | 3552 | # #!/usr/bin/python
# # -*- coding: utf-8 -*-
#
# # Author:
# # Linwood Creekmore
# # Email: valinvescap@gmail.com
#
# ##############################
# # Standard Library Import
# ##############################
#
# import os
# from unittest import TestCase
#
# ##############################
# # Third Party Libraries
# ##############################
#
# import pandas as pd
# import numpy as np
# import coveralls
#
# ##############################
# # Custom Library Import
# ##############################
#
# import gdelt
# from gdelt.base import codes, BASE_DIR
#
#
# class testTableValues(TestCase):
# def test_codedataframe(self):
# """Test CAMEO Code dataframe."""
#
# f = os.path.join(BASE_DIR, 'data', 'cameoCodes.json')
# resp = pd.read_json(f, dtype={'cameoCode': 'str', "GoldsteinScale": np.float64})
# resp.set_index('cameoCode', drop=False, inplace=True)
# print("This is {}".format(gdelt.__file__))
# return (self.assertTrue(resp.equals(codes)))
#
# def test_events1_columns(self):
# """Test events 1 column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'events1.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=1)
# ret = tabs.gettable('events')
# return (self.assertTrue(resp.equals(ret)))
#
# def test_events2_columns(self):
# """Test events version 2 column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'events2.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=2)
# ret = tabs.gettable('events')
# return (self.assertTrue(resp.equals(ret)))
#
# def test_mentions_columns_pass(self):
# """Test mentions version 2 pass column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'mentions.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=2)
# ret = tabs.gettable('mentions')
# return (self.assertTrue(resp.equals(ret)))
#
# def test_mentions_columns_fail(self):
# """Fail mentions version 2 pass column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'mentions.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=1)
# exp = 'GDELT 1.0 does not have a mentions table.'
# with self.assertRaises(Exception) as context:
# checked = tabs.gettable('mentions')
# the_exception = context.exception
# return self.assertEqual(exp, str(the_exception), "Exception for wrong table name.")
#
# def test_gkg_columns_pass(self):
# """Test gkg version 2 pass column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'gkg2.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=2)
# ret = tabs.gettable('gkg')
# return (self.assertTrue(resp.equals(ret)))
#
# def test_vgkg_columns_pass(self):
# """Test visual gkg version 2 pass column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'visualgkg.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=2)
# ret = tabs.gettable('vgkg')
# return (self.assertTrue(resp.equals(ret)))
#
# def test_iatv_columns_pass(self):
# """Test iatv pass column descriptions"""
#
# f = os.path.join(BASE_DIR, 'data', 'iatv.csv')
# resp = pd.read_csv(f)
# tabs = gdelt.tableInfo(version=2)
# ret = tabs.gettable('iatv')
# return (self.assertTrue(resp.equals(ret)))
#
#
| gpl-3.0 |
quasars100/Resonance_testing_scripts | alice/plotdata.py | 1 | 2845 | import matplotlib.pyplot as plt
import pylab
import numpy
import rebound
import reboundxf
from pylab import *
data = numpy.loadtxt('data.txt', unpack = True)
time = data[0]
e1 = data[1]
e2 = data[2]
pratio = data[3]
l1 = data[4]
l2 = data[5]
varpi1 = data[6]
varpi2 = data[7]
a1 = data[8]
a2 = data[9]
sumpr = 0
for i in range(1000):
sumpr+=pratio[i]
avg=sumpr/1000
print(avg)
def anglerange(val):
while val < 0:
val += 2*np.pi
while val > 2*np.pi:
val -= 2*np.pi
return val*180/np.pi
phi1 = [anglerange(3*l1[pts] - 2*l2[pts] - varpi1[pts]) for pts in range(1000)]
phi2 = [anglerange(3*l1[pts2] - 2*l2[pts2] - varpi2[pts2]) for pts2 in range(1000)]
plt.figure()
plt.plot(time, e1, linewidth = 2.0, color = 'red')
plt.plot(time, e2, linewidth = 2.0, color = 'green')
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Eccentricity', fontsize = 12)
plot(time, e1, linewidth = 1.0, color = 'red', label = 'Smaller planet eccentricity')
plot(time, e2, linewidth = 1.0, color = 'green', label = 'Larger planet eccentricity')
legend(loc = 'upper right')
plt.savefig('old_E_outermass_1.pdf')
print('working')
plt.figure()
plt.plot(time, pratio, linewidth = 2.0, color = 'blue')
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Period Ratio', fontsize = 12)
plt.savefig('old_outermass_PR_1.pdf')
print('working')
plt.figure()
plt.scatter(time, l1, color = 'purple', linestyle = 'dashed', s=10)
plt.scatter(time, l2, color = 'orange', linestyle = 'dashed', s=10)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Longitude', fontsize = 12)
scatter(time, l1, color = 'purple', label = 'Smaller planet longitude', s = 10)
scatter(time, l2, color = 'orange', label = 'Larger planet longitude', s = 10)
legend(loc = 'upper right')
plt.savefig('old_longitude_outermass_1.pdf')
print('working')
plt.figure()
plt.scatter(time, varpi1, linewidth = 2.0, color = 'black', s=10)
plt.scatter(time, varpi2, linewidth = 2.0, color = 'pink', s=10)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Time (years)', fontsize = 12)
scatter(time, varpi1, color = 'black', label = 'Smaller planet varpi', s= 10)
scatter(time, varpi2, color = 'pink', label = 'Larger planet varpi', s =10)
legend(loc = 'upper right')
plt.savefig('old_varpi_outermass_1.pdf')
print('working')
plt.figure()
plt.scatter(time, phi1, color = 'blue', s = 10)
plt.scatter(time, phi2, color = 'orange', s = 10)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Resonance angles', fontsize = 12)
scatter(time, phi1, color = 'blue', s = 10, label = 'smaller planet')
scatter(time, phi2, color = 'orange', s = 10, label = 'larger planet')
plt.savefig('resonanceangle_outermass.pdf')
plt.figure()
plt.plot(time, a1)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Axis', fontsize = 12)
plt.savefig('old_outermass_axis_1.pdf')
print('done')
| gpl-3.0 |
vinodkc/spark | python/pyspark/sql/session.py | 8 | 31156 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from functools import reduce
from threading import RLock
from pyspark import since
from pyspark.rdd import RDD
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.conversion import SparkConversionMixin
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import DataType, StructType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
Parameters
----------
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
sampleRatio : float, optional
the sample ratio of rows used for inferring
Returns
-------
:class:`DataFrame`
Examples
--------
>>> rdd.toDF().collect()
[Row(name='Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(SparkConversionMixin):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
.. autoattribute:: builder
:annotation:
Examples
--------
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
>>> from datetime import datetime
>>> from pyspark.sql import Row
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + 1)=2, (d + 1)=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, 'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
.. versionadded:: 2.0.0
Parameters
----------
key : str, optional
a key name string for configuration property
value : str, optional
a value for configuration property
conf : :class:`SparkConf`, optional
an instance of :class:`SparkConf`
Examples
--------
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
.. versionadded:: 2.0.0
Parameters
----------
master : str
a url for spark master
"""
return self.config("spark.master", master)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
.. versionadded:: 2.0.0
Parameters
----------
name : str
an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
.. versionadded:: 2.0.0
Examples
--------
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
_activeSession = None
def __init__(self, sparkContext, jsparkSession=None):
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder
.. versionadded:: 3.0.0
Returns
-------
:class:`SparkSession`
Spark session if an active session exists for the current thread
Examples
--------
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
Returns
-------
:class:`pyspark.sql.conf.RuntimeConfig`
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
.. versionadded:: 2.0.0
Returns
-------
:class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
.. versionadded:: 2.0.0
Returns
-------
:class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
.. versionadded:: 2.0.0
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numPartitions : int, optional
the number of partitions of the DataFrame
Returns
-------
:class:`DataFrame`
Examples
--------
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row, dict, or tuple.
Parameters
----------
data : iterable
list of Row, dict, or tuple
names : list, optional
list of column names
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row, dict, or tuple.
Parameters
----------
rdd : :class:`RDD`
an RDD of Row, dict, or tuple
samplingRatio : float, optional
sampling ratio, or no sampling (default)
names : list, optional
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
.. versionadded:: 2.0.0
.. versionchanged:: 2.1.0
Added verifySchema.
Parameters
----------
data : :class:`RDD` or iterable
an RDD of any kind of SQL data representation (:class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
samplingRatio : float, optional
the sample ratio of rows used for inferring
verifySchema : bool, optional
verify data types of every row against schema. Enabled by default.
Returns
-------
:class:`DataFrame`
Notes
-----
Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
Examples
--------
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1='Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name='Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name='Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1='Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name='Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name='Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a='Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, str):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
# Create a DataFrame from pandas DataFrame.
return super(SparkSession, self).createDataFrame(
data, schema, samplingRatio, verifySchema)
return self._create_dataframe(data, schema, samplingRatio, verifySchema)
def _create_dataframe(self, data, schema, samplingRatio, verifySchema):
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
.. versionadded:: 2.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2='row1'), Row(f1=2, f2='row2'), Row(f1=3, f2='row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
.. versionadded:: 2.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
.. versionadded:: 2.0.0
Returns
-------
:class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
from pyspark.sql.context import SQLContext
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
SQLContext._instantiatedContext = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Eric89GXL/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 8 | 2692 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
| bsd-3-clause |
pwcazenave/PySeidon | pyseidon/adcpClass/plotsAdcp.py | 2 | 3698 | #!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
import seaborn
from windrose import WindroseAxes
from interpolation_utils import *
class PlotsAdcp:
"""'Plots' subset of FVCOM class gathers plotting functions"""
def __init__(self, variable, debug=False):
self._var = variable
def plot_xy(self, x, y, xerror=[], yerror=[], title=' ', xLabel=' ', yLabel=' '):
"""
Simple X vs Y plot
Inputs:
------
- x = 1D array
- y = 1D array
Keywords:
--------
- xerror = error on 'x', 1D array
- yerror = error on 'y', 1D array
- title = plot title, string
- xLabel = title of the x-axis, string
- yLabel = title of the y-axis, string
"""
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
self._fig = plt.plot(x, y, label=title)
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
plt.ylabel(yLabel)
plt.xlabel(xLabel)
if not yerror==[]:
#plt.errorbar(x, y, yerr=yerror, fmt='o', ecolor='k')
plt.fill_between(x, y-yerror, y+yerror,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
if not xerror==[]:
#plt.errorbar(x, y, xerr=xerror, fmt='o', ecolor='k')
plt.fill_betweenx(y, x-xerror, x+xerror,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
plt.show()
def Histogram(self, y, title=' ', xLabel=' ', yLabel=' '):
"""
Histogram plot
Inputs:
------
- bins = list of bin edges
- y = 1D array
Keywords:
--------
- title = plot title, string
- xLabel = title of the x-axis, string
- yLabel = title of the y-axis, string
"""
fig = plt.figure(figsize=(18,10))
density, bins = np.histogram(y, bins=50, normed=True, density=True)
unity_density = density / density.sum()
widths = bins[:-1] - bins[1:]
# To plot correct percentages in the y axis
plt.bar(bins[1:], unity_density, width=widths)
formatter = ticker.FuncFormatter(lambda v, pos: str(v * 100))
plt.gca().yaxis.set_major_formatter(formatter)
plt.ylabel(yLabel)
plt.xlabel(xLabel)
plt.show()
def rose_diagram(self, direction, norm):
"""
Plot rose diagram
Inputs:
------
- direction = 1D array
- norm = 1D array
"""
#Convertion
#TR: not quite sure here, seems to change from location to location
# express principal axis in compass
direction = np.mod(90.0 - direction, 360.0)
#Create new figure
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)#, axisbg='w')
fig.add_axes(ax)
#Rose
ax.bar(direction, norm , normed=True, opening=0.8, edgecolor='white')
#adjust legend
l = ax.legend(shadow=True, bbox_to_anchor=[-0.1, 0], loc='lower left')
plt.setp(l.get_texts(), fontsize=10)
plt.xlabel('Rose diagram in % of occurrences - Colormap of norms')
plt.show()
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
| agpl-3.0 |
hbp-unibi/SNABSuite | source/SNABs/mnist/python/mnist_view.py | 1 | 1496 | from __future__ import print_function
from keras.datasets import mnist
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nr_images = 5
#for i in range(0,10):
#print(x_train[i])
#print(y_train[i])
#plt.imshow(x_train[i].reshape((28,28)), cmap='gray')
#plt.show()
for i in range(0,nr_images):
print(y_train[i], ", ")
with open("train.txt", 'w') as file:
file.write("{")
for i in range(0,nr_images):
file.write(str(y_train[i]))
file.write(", ")
file.write("}\n")
file.write("{")
for i in range(0,nr_images):
file.write("{")
for j in range(0,len(x_train[i])):
file.write(str(x_train[i][j]))
file.write(", ")
file.write("}, \n")
file.write("}\n")
with open("test.txt", 'w') as file:
file.write("{")
for i in range(0,nr_images):
file.write(str(y_test[i]))
file.write(", ")
file.write("}\n")
file.write("{")
for i in range(0,nr_images):
file.write("{")
for j in range(0,len(x_test[i])):
file.write(str(x_test[i][j]))
file.write(", ")
file.write("}, \n")
file.write("}")
| gpl-3.0 |
tomkooij/sapphire | scripts/kascade/reconstruction_efficiency.py | 1 | 14972 | from __future__ import division
import tables
import numpy as np
import pylab as plt
from scipy import optimize, stats
from sapphire.analysis import landau
import utils
from artist import GraphArtist
import artist.utils
RANGE_MAX = 40000
N_BINS = 400
LOW, HIGH = 500, 5500
VNS = .57e-3 * 2.5
USE_TEX = True
# For matplotlib plots
if USE_TEX:
plt.rcParams['font.serif'] = 'Computer Modern'
plt.rcParams['font.sans-serif'] = 'Computer Modern'
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['figure.figsize'] = [4 * x for x in (1, 2. / 3)]
plt.rcParams['figure.subplot.left'] = 0.175
plt.rcParams['figure.subplot.bottom'] = 0.175
plt.rcParams['font.size'] = 10
plt.rcParams['legend.fontsize'] = 'small'
plt.rcParams['text.usetex'] = True
class ReconstructionEfficiency(object):
def __init__(self, data):
global scintillator
self.data = data
if 'scintillator' in globals():
self.scintillator = scintillator
else:
self.scintillator = landau.Scintillator()
scintillator = self.scintillator
def main(self):
self.plot_spectrum_fit_chisq()
self.plot_gamma_landau_fit()
self.plot_detection_efficiency()
def calc_charged_fraction(self, x, y, p_gamma, p_landau):
y_charged = self.calc_charged_spectrum(x, y, p_gamma, p_landau)
N_full = y.sum()
N_charged = y_charged.sum()
return N_charged / N_full
def calc_charged_spectrum(self, x, y, p_gamma, p_landau):
y_landau = self.scintillator.conv_landau_for_x(x, *p_landau)
max_pos = x[y_landau.argmax()]
y_gamma = self.gamma_func(x, *p_gamma)
y_gamma_trunc = np.where(x <= 3 * max_pos, y_gamma, 0.)
y_reduced = y - y_gamma_trunc
mev_scale = p_landau[1]
y_charged_left = y_landau.compress(x <= max_pos)
y_charged_right = y_reduced.compress(max_pos < x)
y_charged = np.array(y_charged_left.tolist() +
y_charged_right.tolist())
return y_charged
def full_spectrum_fit(self, x, y, p0_gamma, p0_landau):
p_gamma = self.fit_gammas_to_data(x, y, p0_gamma)
p_landau = self.fit_conv_landau_to_data(x, y - self.gamma_func(x, *p_gamma),
p0_landau)
p_gamma, p_landau = self.fit_complete(x, y, p_gamma, p_landau)
return p_gamma, p_landau
def constrained_full_spectrum_fit(self, x, y, p0_gamma, p0_landau):
p_gamma, p_landau = self.constrained_fit_complete(x, y, p0_gamma, p0_landau)
return p_gamma, p_landau
def plot_gamma_landau_fit(self):
events = self.data.root.hisparc.cluster_kascade.station_601.events
ph0 = events.col('integrals')[:, 0]
bins = np.linspace(0, RANGE_MAX, N_BINS + 1)
n, bins = np.histogram(ph0, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
p_gamma, p_landau = self.full_spectrum_fit(x, n, (1., 1.),
(5e3 / .32, 3.38 / 5000, 1.))
print "FULL FIT"
print p_gamma, p_landau
n /= 10
p_gamma, p_landau = self.constrained_full_spectrum_fit(x, n, p_gamma, p_landau)
print "CONSTRAINED FIT"
print p_gamma, p_landau
plt.figure()
print self.calc_charged_fraction(x, n, p_gamma, p_landau)
plt.plot(x * VNS, n)
self.plot_landau_and_gamma(x, p_gamma, p_landau)
#plt.plot(x, n - self.gamma_func(x, *p_gamma))
plt.xlabel("Pulse integral [V ns]")
plt.ylabel("Count")
plt.yscale('log')
plt.xlim(0, 30)
plt.ylim(1e1, 1e4)
plt.legend()
utils.saveplot()
graph = GraphArtist('semilogy')
graph.histogram(n, bins * VNS, linestyle='gray')
self.artistplot_landau_and_gamma(graph, x, p_gamma, p_landau)
graph.set_xlabel(r"Pulse integral [\si{\volt\nano\second}]")
graph.set_ylabel("Count")
graph.set_xlimits(0, 30)
graph.set_ylimits(1e1, 1e4)
artist.utils.save_graph(graph, dirname='plots')
def plot_spectrum_fit_chisq(self):
global integrals
if 'integrals' not in globals():
events = self.data.root.hisparc.cluster_kascade.station_601.events
integrals = events.col('integrals')[:, 0]
bins = np.linspace(0, RANGE_MAX, N_BINS + 1)
n, bins = np.histogram(integrals, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
p_gamma, p_landau = self.full_spectrum_fit(x, n, (1., 1.),
(5e3 / .32, 3.38 / 5000, 1.))
print "FULL FIT"
print p_gamma, p_landau
print "charged fraction:", self.calc_charged_fraction(x, n, p_gamma, p_landau)
landaus = scintillator.conv_landau_for_x(x, *p_landau)
gammas = self.gamma_func(x, *p_gamma)
fit = landaus + gammas
x_trunc = x.compress((LOW <= x) & (x < HIGH))
n_trunc = n.compress((LOW <= x) & (x < HIGH))
fit_trunc = fit.compress((LOW <= x) & (x < HIGH))
chisq, pvalue = stats.chisquare(n_trunc, fit_trunc, ddof=5)
chisq /= (len(n_trunc) - 1 - 5)
print "Chi-square statistic:", chisq, pvalue
plt.figure()
plt.plot(x * VNS, n)
self.plot_landau_and_gamma(x, p_gamma, p_landau)
#plt.plot(x_trunc * VNS, fit_trunc, linewidth=4)
plt.axvline(LOW * VNS)
plt.axvline(HIGH * VNS)
plt.xlabel("Pulse integral [V ns]")
plt.ylabel("Count")
plt.yscale('log')
plt.xlim(0, 20)
plt.ylim(1e2, 1e5)
plt.title(r"$\chi^2_{red}$: %.2f, p-value: %.2e" % (chisq, pvalue))
utils.saveplot()
plt.figure()
plt.plot(x_trunc * VNS, n_trunc - fit_trunc)
plt.axhline(0)
plt.xlabel("Pulse integral [V ns]")
plt.ylabel("Data - Fit")
plt.title(r"$\chi^2_{red}$: %.2f, p-value: %.2e" % (chisq, pvalue))
utils.saveplot(suffix='residuals')
def plot_landau_and_gamma(self, x, p_gamma, p_landau):
gammas = self.gamma_func(x, *p_gamma)
gamma_trunc = np.where(x * VNS <= 21, gammas, 0.)
plt.plot(x * VNS, gamma_trunc, label='gamma')
landaus = self.scintillator.conv_landau_for_x(x, *p_landau)
plt.plot(x * VNS, landaus, label='landau/gauss')
plt.plot(x * VNS, gamma_trunc + landaus, label='gamma + landau/gauss')
def artistplot_landau_and_gamma(self, graph, x, p_gamma, p_landau):
gammas = self.gamma_func(x, *p_gamma)
gamma_trunc = np.where(x * VNS <= 21, gammas, 1e-99)
graph.plot(x * VNS, gamma_trunc, mark=None, linestyle='dashed')
landaus = self.scintillator.conv_landau_for_x(x, *p_landau)
graph.plot(x * VNS, landaus, mark=None, linestyle='dashdotted')
graph.plot(x * VNS, gamma_trunc + landaus, mark=None)
def artistplot_alt_landau_and_gamma(self, graph, x, p_gamma, p_landau):
gammas = self.gamma_func(x, *p_gamma)
gamma_trunc = np.where(x * VNS <= 21, gammas, 1e-99)
graph.plot(x * VNS, gamma_trunc, mark=None, linestyle='dashed,gray')
landaus = self.scintillator.conv_landau_for_x(x, *p_landau)
graph.plot(x * VNS, landaus, mark=None, linestyle='dashdotted,gray')
def fit_gammas_to_data(self, x, y, p0):
condition = (LOW <= x) & (x < 2000)
x_trunc = x.compress(condition)
y_trunc = y.compress(condition)
popt, pcov = optimize.curve_fit(self.gamma_func, x_trunc, y_trunc,
p0=p0, sigma=np.sqrt(y_trunc))
return popt
def gamma_func(self, x, N, a):
return N * x ** -a
def fit_conv_landau_to_data(self, x, y, p0):
popt = optimize.fmin(self.scintillator.residuals, p0,
(x, y, 4500, 5500), disp=0)
return popt
def fit_complete(self, x, y, p_gamma, p_landau):
p0 = list(p_gamma) + list(p_landau)
popt = optimize.fmin(self.complete_residuals, p0,
(self.scintillator, x, y, LOW, HIGH),
maxfun=100000, disp=0)
return popt[:2], popt[2:]
def constrained_fit_complete(self, x, y, p_gamma, p_landau):
N_gamma = p_gamma[0]
N_landau = p_landau[0]
popt = optimize.fmin(self.constrained_complete_residuals,
(N_gamma, N_landau),
(self.scintillator, x, y, p_gamma,
p_landau, LOW, HIGH),
maxfun=100000, disp=0)
p_gamma[0] = popt[0]
p_landau[0] = popt[1]
return p_gamma, p_landau
def complete_residuals(self, par, scintillator, x, y, a, b):
landaus = scintillator.conv_landau_for_x(x, *par[2:])
gammas = self.gamma_func(x, *par[:2])
y_exp = landaus + gammas
y_trunc = y.compress((a <= x) & (x < b))
y_exp_trunc = y_exp.compress((a <= x) & (x < b))
# Make sure no zeroes end up in denominator of chi_squared
y_trunc = np.where(y_trunc != 0., y_trunc, 1.)
chisquared = ((y_trunc - y_exp_trunc) ** 2 / y_trunc).sum()
return chisquared
def constrained_complete_residuals(self, par, scintillator, x, y,
p_gamma, p_landau, a, b):
full_par = (par[0], p_gamma[1], par[1], p_landau[1], p_landau[2])
return self.complete_residuals(full_par, scintillator, x, y, a, b)
def get_integrals_and_densities(self):
hisparc = self.data.root.hisparc.cluster_kascade.station_601.events
kascade = self.data.root.kascade.events
c_index = self.data.root.kascade.c_index
h_index = c_index.col('h_idx')
k_index = c_index.col('k_idx')
intg = hisparc.read_coordinates(h_index, 'integrals')[:, 0]
dens_e = kascade.read_coordinates(k_index, 'dens_e')[:, 0]
dens_mu = kascade.read_coordinates(k_index, 'dens_mu')[:, 0]
theta = kascade.read_coordinates(k_index, 'zenith')
dens = dens_e + dens_mu
dens_on_ground = dens * np.cos(theta)
return intg, dens_on_ground
def full_fit_on_data(self, integrals, p0):
bins = np.linspace(0, RANGE_MAX, N_BINS + 1)
n, bins = np.histogram(integrals, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
p_gamma, p_landau = self.full_spectrum_fit(x, n, p0[:2], p0[2:])
return list(p_gamma) + list(p_landau)
def determine_charged_fraction(self, integrals, p0):
bins = np.linspace(0, RANGE_MAX, N_BINS + 1)
n, bins = np.histogram(integrals, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
p_gamma, p_landau = self.constrained_full_spectrum_fit(x, n, p0[:2], p0[2:])
return self.calc_charged_fraction(x, n, p_gamma, p_landau)
def plot_detection_efficiency(self):
integrals, dens = self.get_integrals_and_densities()
popt = self.full_fit_on_data(integrals,
(1., 1., 5e3 / .32, 3.38 / 5000, 1.))
x, y, yerr = [], [], []
dens_bins = np.linspace(0, 10, 51)
for low, high in zip(dens_bins[:-1], dens_bins[1:]):
sel = integrals.compress((low <= dens) & (dens < high))
x.append((low + high) / 2)
frac = self.determine_charged_fraction(sel, popt)
y.append(frac)
yerr.append(np.sqrt(frac * len(sel)) / len(sel))
print (low + high) / 2, len(sel)
self.plot_full_spectrum_fit_in_density_range(sel, popt, low, high)
print
plt.figure()
plt.errorbar(x, y, yerr, fmt='o', label='data', markersize=3.)
popt, pcov = optimize.curve_fit(self.conv_p_detection, x, y, p0=(1.,))
print "Sigma Gauss:", popt
x2 = plt.linspace(0, 10, 101)
plt.plot(x2, self.p_detection(x2), label='poisson')
plt.plot(x2, self.conv_p_detection(x2, *popt), label='poisson/gauss')
plt.xlabel("Charged particle density [$m^{-2}$]")
plt.ylabel("Detection probability")
plt.ylim(0, 1.)
plt.legend(loc='best')
utils.saveplot()
graph = GraphArtist()
graph.plot(x2, self.p_detection(x2), mark=None)
graph.plot(x2, self.conv_p_detection(x2, *popt), mark=None,
linestyle='dashed')
graph.plot(x, y, yerr=yerr, linestyle=None)
graph.set_xlabel(
r"Charged particle density [\si{\per\square\meter}]")
graph.set_ylabel("Detection probability")
graph.set_xlimits(min=0)
graph.set_ylimits(min=0)
artist.utils.save_graph(graph, dirname='plots')
def plot_full_spectrum_fit_in_density_range(self, sel, popt, low, high):
bins = np.linspace(0, RANGE_MAX, N_BINS + 1)
n, bins = np.histogram(sel, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
p_gamma, p_landau = self.constrained_full_spectrum_fit(x, n, popt[:2], popt[2:])
plt.figure()
plt.plot(x * VNS, n, label='data')
self.plot_landau_and_gamma(x, p_gamma, p_landau)
y_charged = self.calc_charged_spectrum(x, n, p_gamma, p_landau)
plt.plot(x * VNS, y_charged, label='charged particles')
plt.yscale('log')
plt.xlim(0, 50)
plt.ylim(ymin=1)
plt.xlabel("Pulse integral [V ns]")
plt.ylabel("Count")
plt.legend()
suffix = '%.1f-%.1f' % (low, high)
suffix = suffix.replace('.', '_')
utils.saveplot(suffix)
n = np.where(n > 0, n, 1e-99)
y_charged = np.where(y_charged > 0, y_charged, 1e-99)
graph = GraphArtist('semilogy')
graph.histogram(n, bins * VNS, linestyle='gray')
self.artistplot_alt_landau_and_gamma(graph, x, p_gamma, p_landau)
graph.histogram(y_charged, bins * VNS)
graph.set_xlabel(r"Pulse integral [\si{\volt\nano\second}]")
graph.set_ylabel("Count")
graph.set_title(r"$\SI{%.1f}{\per\square\meter} \leq \rho_\mathrm{charged}$ < $\SI{%.1f}{\per\square\meter}$" % (low, high))
graph.set_xlimits(0, 30)
graph.set_ylimits(1e0, 1e4)
artist.utils.save_graph(graph, suffix, dirname='plots')
p_detection = np.vectorize(lambda x: 1 - np.exp(-.5 * x) if x >= 0 else 0.)
def conv_p_detection(self, x, sigma):
x_step = x[-1] - x[-2]
x2 = np.arange(-2 * max(x), 2 * max(x) + x_step / 2, x_step)
g = stats.norm(scale=sigma).pdf
y2 = landau.discrete_convolution(self.p_detection, g, x2)
y = np.interp(x, x2, y2)
return y
if __name__ == '__main__':
np.seterr(invalid='ignore', divide='ignore')
if 'data' not in globals():
data = tables.open_file('kascade.h5', 'r')
utils.set_prefix('EFF-')
artist.utils.set_prefix('EFF-')
efficiency = ReconstructionEfficiency(data)
efficiency.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.